1 // SPDX-License-Identifier: GPL-2.0
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
8 * - free bucket: mark == 0
9 * The bucket contains no data and will not be read
11 * - allocator bucket: owned_by_allocator == 1
12 * The bucket is on a free list, or it is an open bucket
14 * - cached bucket: owned_by_allocator == 0 &&
15 * dirty_sectors == 0 &&
17 * The bucket contains data but may be safely discarded as there are
18 * enough replicas of the data on other cache devices, or it has been
19 * written back to the backing device
21 * - dirty bucket: owned_by_allocator == 0 &&
23 * The bucket contains data that we must not discard (either only copy,
24 * or one of the 'main copies' for data requiring multiple replicas)
26 * - metadata bucket: owned_by_allocator == 0 && is_metadata == 1
27 * This is a btree node, journal or gen/prio bucket
31 * bucket invalidated => bucket on freelist => open bucket =>
32 * [dirty bucket =>] cached bucket => bucket invalidated => ...
34 * Note that cache promotion can skip the dirty bucket step, as data
35 * is copied from a deeper tier to a shallower tier, onto a cached
37 * Note also that a cached bucket can spontaneously become dirty --
40 * Only a traversal of the key space can determine whether a bucket is
41 * truly dirty or cached.
45 * - free => allocator: bucket was invalidated
46 * - cached => allocator: bucket was invalidated
48 * - allocator => dirty: open bucket was filled up
49 * - allocator => cached: open bucket was filled up
50 * - allocator => metadata: metadata was allocated
52 * - dirty => cached: dirty sectors were copied to a deeper tier
53 * - dirty => free: dirty sectors were overwritten or moved (copy gc)
54 * - cached => free: cached sectors were overwritten
56 * - metadata => free: metadata was freed
59 * - cached => dirty: a device was removed so formerly replicated data
60 * is no longer sufficiently replicated
61 * - free => cached: cannot happen
62 * - free => dirty: cannot happen
63 * - free => metadata: cannot happen
67 #include "alloc_background.h"
70 #include "btree_update.h"
77 #include <linux/preempt.h>
78 #include <trace/events/bcachefs.h>
80 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
81 enum bch_data_type data_type,
86 fs_usage->btree += sectors;
90 fs_usage->data += sectors;
93 fs_usage->cached += sectors;
101 * Clear journal_seq_valid for buckets for which it's not needed, to prevent
104 void bch2_bucket_seq_cleanup(struct bch_fs *c)
106 u64 journal_seq = atomic64_read(&c->journal.seq);
107 u16 last_seq_ondisk = c->journal.last_seq_ondisk;
109 struct bucket_array *buckets;
111 struct bucket_mark m;
114 if (journal_seq - c->last_bucket_seq_cleanup <
115 (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
118 c->last_bucket_seq_cleanup = journal_seq;
120 for_each_member_device(ca, c, i) {
121 down_read(&ca->bucket_lock);
122 buckets = bucket_array(ca);
124 for_each_bucket(g, buckets) {
125 bucket_cmpxchg(g, m, ({
126 if (!m.journal_seq_valid ||
127 bucket_needs_journal_commit(m, last_seq_ondisk))
130 m.journal_seq_valid = 0;
133 up_read(&ca->bucket_lock);
137 void bch2_fs_usage_initialize(struct bch_fs *c)
139 struct bch_fs_usage *usage;
142 percpu_down_write(&c->mark_lock);
143 usage = c->usage_base;
145 bch2_fs_usage_acc_to_base(c, 0);
146 bch2_fs_usage_acc_to_base(c, 1);
148 for (i = 0; i < BCH_REPLICAS_MAX; i++)
149 usage->reserved += usage->persistent_reserved[i];
151 for (i = 0; i < c->replicas.nr; i++) {
152 struct bch_replicas_entry *e =
153 cpu_replicas_entry(&c->replicas, i);
155 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
158 percpu_up_write(&c->mark_lock);
161 void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage *fs_usage)
163 if (fs_usage == c->usage_scratch)
164 mutex_unlock(&c->usage_scratch_lock);
169 struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *c)
171 struct bch_fs_usage *ret;
172 unsigned bytes = fs_usage_u64s(c) * sizeof(u64);
174 ret = kzalloc(bytes, GFP_NOWAIT|__GFP_NOWARN);
178 if (mutex_trylock(&c->usage_scratch_lock))
181 ret = kzalloc(bytes, GFP_NOFS);
185 mutex_lock(&c->usage_scratch_lock);
187 ret = c->usage_scratch;
188 memset(ret, 0, bytes);
192 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
194 struct bch_dev_usage ret;
196 memset(&ret, 0, sizeof(ret));
197 acc_u64s_percpu((u64 *) &ret,
198 (u64 __percpu *) ca->usage[0],
199 sizeof(ret) / sizeof(u64));
204 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
205 unsigned journal_seq,
208 return this_cpu_ptr(gc
210 : c->usage[journal_seq & 1]);
213 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
215 ssize_t offset = v - (u64 *) c->usage_base;
219 BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
220 percpu_rwsem_assert_held(&c->mark_lock);
223 seq = read_seqcount_begin(&c->usage_lock);
225 percpu_u64_get((u64 __percpu *) c->usage[0] + offset) +
226 percpu_u64_get((u64 __percpu *) c->usage[1] + offset);
227 } while (read_seqcount_retry(&c->usage_lock, seq));
232 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
234 struct bch_fs_usage *ret;
235 unsigned seq, v, u64s = fs_usage_u64s(c);
237 ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
241 percpu_down_read(&c->mark_lock);
243 v = fs_usage_u64s(c);
244 if (unlikely(u64s != v)) {
246 percpu_up_read(&c->mark_lock);
252 seq = read_seqcount_begin(&c->usage_lock);
253 memcpy(ret, c->usage_base, u64s * sizeof(u64));
254 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[0], u64s);
255 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[1], u64s);
256 } while (read_seqcount_retry(&c->usage_lock, seq));
261 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
263 unsigned u64s = fs_usage_u64s(c);
268 write_seqcount_begin(&c->usage_lock);
270 acc_u64s_percpu((u64 *) c->usage_base,
271 (u64 __percpu *) c->usage[idx], u64s);
272 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
274 write_seqcount_end(&c->usage_lock);
278 void bch2_fs_usage_to_text(struct printbuf *out,
280 struct bch_fs_usage *fs_usage)
284 pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
286 pr_buf(out, "hidden:\t\t\t\t%llu\n",
288 pr_buf(out, "data:\t\t\t\t%llu\n",
290 pr_buf(out, "cached:\t\t\t\t%llu\n",
292 pr_buf(out, "reserved:\t\t\t%llu\n",
294 pr_buf(out, "nr_inodes:\t\t\t%llu\n",
295 fs_usage->nr_inodes);
296 pr_buf(out, "online reserved:\t\t%llu\n",
297 fs_usage->online_reserved);
300 i < ARRAY_SIZE(fs_usage->persistent_reserved);
302 pr_buf(out, "%u replicas:\n", i + 1);
303 pr_buf(out, "\treserved:\t\t%llu\n",
304 fs_usage->persistent_reserved[i]);
307 for (i = 0; i < c->replicas.nr; i++) {
308 struct bch_replicas_entry *e =
309 cpu_replicas_entry(&c->replicas, i);
312 bch2_replicas_entry_to_text(out, e);
313 pr_buf(out, ":\t%llu\n", fs_usage->replicas[i]);
317 #define RESERVE_FACTOR 6
319 static u64 reserve_factor(u64 r)
321 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
324 static u64 avail_factor(u64 r)
326 return (r << RESERVE_FACTOR) / ((1 << RESERVE_FACTOR) + 1);
329 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage *fs_usage)
331 return min(fs_usage->hidden +
334 reserve_factor(fs_usage->reserved +
335 fs_usage->online_reserved),
339 static struct bch_fs_usage_short
340 __bch2_fs_usage_read_short(struct bch_fs *c)
342 struct bch_fs_usage_short ret;
345 ret.capacity = c->capacity -
346 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
348 data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
349 bch2_fs_usage_read_one(c, &c->usage_base->btree);
350 reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
351 bch2_fs_usage_read_one(c, &c->usage_base->online_reserved);
353 ret.used = min(ret.capacity, data + reserve_factor(reserved));
354 ret.free = ret.capacity - ret.used;
356 ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
361 struct bch_fs_usage_short
362 bch2_fs_usage_read_short(struct bch_fs *c)
364 struct bch_fs_usage_short ret;
366 percpu_down_read(&c->mark_lock);
367 ret = __bch2_fs_usage_read_short(c);
368 percpu_up_read(&c->mark_lock);
373 static inline int is_unavailable_bucket(struct bucket_mark m)
375 return !is_available_bucket(m);
378 static inline int is_fragmented_bucket(struct bucket_mark m,
381 if (!m.owned_by_allocator &&
382 m.data_type == BCH_DATA_user &&
383 bucket_sectors_used(m))
384 return max_t(int, 0, (int) ca->mi.bucket_size -
385 bucket_sectors_used(m));
389 static inline int is_stripe_data_bucket(struct bucket_mark m)
391 return m.stripe && m.data_type != BCH_DATA_parity;
394 static inline int bucket_stripe_sectors(struct bucket_mark m)
396 return is_stripe_data_bucket(m) ? m.dirty_sectors : 0;
399 static inline enum bch_data_type bucket_type(struct bucket_mark m)
401 return m.cached_sectors && !m.dirty_sectors
406 static bool bucket_became_unavailable(struct bucket_mark old,
407 struct bucket_mark new)
409 return is_available_bucket(old) &&
410 !is_available_bucket(new);
413 int bch2_fs_usage_apply(struct bch_fs *c,
414 struct bch_fs_usage *fs_usage,
415 struct disk_reservation *disk_res,
416 unsigned journal_seq)
418 s64 added = fs_usage->data + fs_usage->reserved;
419 s64 should_not_have_added;
422 percpu_rwsem_assert_held(&c->mark_lock);
425 * Not allowed to reduce sectors_available except by getting a
428 should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
429 if (WARN_ONCE(should_not_have_added > 0,
430 "disk usage increased by %lli more than reservation of %llu",
431 added, disk_res ? disk_res->sectors : 0)) {
432 atomic64_sub(should_not_have_added, &c->sectors_available);
433 added -= should_not_have_added;
438 disk_res->sectors -= added;
439 fs_usage->online_reserved -= added;
443 acc_u64s((u64 *) fs_usage_ptr(c, journal_seq, false),
444 (u64 *) fs_usage, fs_usage_u64s(c));
450 static inline void account_bucket(struct bch_fs_usage *fs_usage,
451 struct bch_dev_usage *dev_usage,
452 enum bch_data_type type,
455 if (type == BCH_DATA_sb || type == BCH_DATA_journal)
456 fs_usage->hidden += size;
458 dev_usage->buckets[type] += nr;
461 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
462 struct bch_fs_usage *fs_usage,
463 struct bucket_mark old, struct bucket_mark new,
466 struct bch_dev_usage *u;
468 percpu_rwsem_assert_held(&c->mark_lock);
471 u = this_cpu_ptr(ca->usage[gc]);
473 if (bucket_type(old))
474 account_bucket(fs_usage, u, bucket_type(old),
475 -1, -ca->mi.bucket_size);
477 if (bucket_type(new))
478 account_bucket(fs_usage, u, bucket_type(new),
479 1, ca->mi.bucket_size);
482 (int) new.owned_by_allocator - (int) old.owned_by_allocator;
483 u->buckets_unavailable +=
484 is_unavailable_bucket(new) - is_unavailable_bucket(old);
486 u->buckets_ec += (int) new.stripe - (int) old.stripe;
487 u->sectors_ec += bucket_stripe_sectors(new) -
488 bucket_stripe_sectors(old);
490 u->sectors[old.data_type] -= old.dirty_sectors;
491 u->sectors[new.data_type] += new.dirty_sectors;
492 u->sectors[BCH_DATA_cached] +=
493 (int) new.cached_sectors - (int) old.cached_sectors;
494 u->sectors_fragmented +=
495 is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
498 if (!is_available_bucket(old) && is_available_bucket(new))
499 bch2_wake_allocator(ca);
503 void bch2_dev_usage_from_buckets(struct bch_fs *c)
506 struct bucket_mark old = { .v.counter = 0 };
507 struct bucket_array *buckets;
512 c->usage_base->hidden = 0;
514 for_each_member_device(ca, c, i) {
515 for_each_possible_cpu(cpu)
516 memset(per_cpu_ptr(ca->usage[0], cpu), 0,
517 sizeof(*ca->usage[0]));
519 buckets = bucket_array(ca);
521 for_each_bucket(g, buckets)
522 bch2_dev_usage_update(c, ca, c->usage_base,
523 old, g->mark, false);
527 static inline int update_replicas(struct bch_fs *c,
528 struct bch_fs_usage *fs_usage,
529 struct bch_replicas_entry *r,
532 int idx = bch2_replicas_entry_idx(c, r);
540 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
541 fs_usage->replicas[idx] += sectors;
545 static inline void update_cached_sectors(struct bch_fs *c,
546 struct bch_fs_usage *fs_usage,
547 unsigned dev, s64 sectors)
549 struct bch_replicas_padded r;
551 bch2_replicas_entry_cached(&r.e, dev);
553 update_replicas(c, fs_usage, &r.e, sectors);
556 static struct replicas_delta_list *
557 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
559 struct replicas_delta_list *d = trans->fs_usage_deltas;
560 unsigned new_size = d ? (d->size + more) * 2 : 128;
562 if (!d || d->used + more > d->size) {
563 d = krealloc(d, sizeof(*d) + new_size, GFP_NOIO|__GFP_ZERO);
567 trans->fs_usage_deltas = d;
572 static inline void update_replicas_list(struct btree_trans *trans,
573 struct bch_replicas_entry *r,
576 struct replicas_delta_list *d;
577 struct replicas_delta *n;
583 b = replicas_entry_bytes(r) + 8;
584 d = replicas_deltas_realloc(trans, b);
586 n = (void *) d->d + d->used;
588 memcpy(&n->r, r, replicas_entry_bytes(r));
592 static inline void update_cached_sectors_list(struct btree_trans *trans,
593 unsigned dev, s64 sectors)
595 struct bch_replicas_padded r;
597 bch2_replicas_entry_cached(&r.e, dev);
599 update_replicas_list(trans, &r.e, sectors);
602 static inline struct replicas_delta *
603 replicas_delta_next(struct replicas_delta *d)
605 return (void *) d + replicas_entry_bytes(&d->r) + 8;
608 int bch2_replicas_delta_list_apply(struct bch_fs *c,
609 struct bch_fs_usage *fs_usage,
610 struct replicas_delta_list *r)
612 struct replicas_delta *d = r->d;
613 struct replicas_delta *top = (void *) r->d + r->used;
616 for (d = r->d; d != top; d = replicas_delta_next(d))
617 if (update_replicas(c, fs_usage, &d->r, d->delta)) {
625 fs_usage->nr_inodes += r->nr_inodes;
627 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
628 fs_usage->reserved += r->persistent_reserved[i];
629 fs_usage->persistent_reserved[i] += r->persistent_reserved[i];
634 for (d = r->d; d != top; d = replicas_delta_next(d))
635 update_replicas(c, fs_usage, &d->r, -d->delta);
639 #define do_mark_fn(fn, c, pos, flags, ...) \
643 percpu_rwsem_assert_held(&c->mark_lock); \
645 for (gc = 0; gc < 2 && !ret; gc++) \
646 if (!gc == !(flags & BTREE_TRIGGER_GC) || \
647 (gc && gc_visited(c, pos))) \
648 ret = fn(c, __VA_ARGS__, gc); \
652 static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
653 size_t b, struct bucket_mark *ret,
656 struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
657 struct bucket *g = __bucket(ca, b, gc);
658 struct bucket_mark old, new;
660 old = bucket_cmpxchg(g, new, ({
661 BUG_ON(!is_available_bucket(new));
663 new.owned_by_allocator = true;
665 new.cached_sectors = 0;
666 new.dirty_sectors = 0;
670 bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
672 if (old.cached_sectors)
673 update_cached_sectors(c, fs_usage, ca->dev_idx,
674 -((s64) old.cached_sectors));
681 void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
682 size_t b, struct bucket_mark *old)
684 do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
687 if (!old->owned_by_allocator && old->cached_sectors)
688 trace_invalidate(ca, bucket_to_sector(ca, b),
689 old->cached_sectors);
692 static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
693 size_t b, bool owned_by_allocator,
696 struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
697 struct bucket *g = __bucket(ca, b, gc);
698 struct bucket_mark old, new;
700 old = bucket_cmpxchg(g, new, ({
701 new.owned_by_allocator = owned_by_allocator;
704 bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
707 !owned_by_allocator && !old.owned_by_allocator);
712 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
713 size_t b, bool owned_by_allocator,
714 struct gc_pos pos, unsigned flags)
718 do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
719 ca, b, owned_by_allocator);
724 static int bch2_mark_alloc(struct bch_fs *c,
725 struct bkey_s_c old, struct bkey_s_c new,
726 struct bch_fs_usage *fs_usage,
727 u64 journal_seq, unsigned flags)
729 bool gc = flags & BTREE_TRIGGER_GC;
730 struct bkey_alloc_unpacked u;
733 struct bucket_mark old_m, m;
735 /* We don't do anything for deletions - do we?: */
736 if (new.k->type != KEY_TYPE_alloc)
740 * alloc btree is read in by bch2_alloc_read, not gc:
742 if ((flags & BTREE_TRIGGER_GC) &&
743 !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
746 ca = bch_dev_bkey_exists(c, new.k->p.inode);
748 if (new.k->p.offset >= ca->mi.nbuckets)
751 g = __bucket(ca, new.k->p.offset, gc);
752 u = bch2_alloc_unpack(new);
754 old_m = bucket_cmpxchg(g, m, ({
756 m.data_type = u.data_type;
757 m.dirty_sectors = u.dirty_sectors;
758 m.cached_sectors = u.cached_sectors;
761 m.journal_seq_valid = 1;
762 m.journal_seq = journal_seq;
766 bch2_dev_usage_update(c, ca, fs_usage, old_m, m, gc);
768 g->io_time[READ] = u.read_time;
769 g->io_time[WRITE] = u.write_time;
770 g->oldest_gen = u.oldest_gen;
774 * need to know if we're getting called from the invalidate path or
778 if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
779 old_m.cached_sectors) {
780 update_cached_sectors(c, fs_usage, ca->dev_idx,
781 -old_m.cached_sectors);
782 trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
783 old_m.cached_sectors);
789 #define checked_add(a, b) \
791 unsigned _res = (unsigned) (a) + (b); \
792 bool overflow = _res > U16_MAX; \
799 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
800 size_t b, enum bch_data_type data_type,
801 unsigned sectors, bool gc)
803 struct bucket *g = __bucket(ca, b, gc);
804 struct bucket_mark old, new;
807 BUG_ON(data_type != BCH_DATA_sb &&
808 data_type != BCH_DATA_journal);
810 old = bucket_cmpxchg(g, new, ({
811 new.data_type = data_type;
812 overflow = checked_add(new.dirty_sectors, sectors);
815 bch2_fs_inconsistent_on(old.data_type &&
816 old.data_type != data_type, c,
817 "different types of data in same bucket: %s, %s",
818 bch2_data_types[old.data_type],
819 bch2_data_types[data_type]);
821 bch2_fs_inconsistent_on(overflow, c,
822 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
823 ca->dev_idx, b, new.gen,
824 bch2_data_types[old.data_type ?: data_type],
825 old.dirty_sectors, sectors);
828 bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc),
834 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
835 size_t b, enum bch_data_type type,
836 unsigned sectors, struct gc_pos pos,
839 BUG_ON(type != BCH_DATA_sb &&
840 type != BCH_DATA_journal);
845 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
846 ca, b, type, sectors);
848 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
854 static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
856 return DIV_ROUND_UP(sectors * n, d);
859 static s64 __ptr_disk_sectors_delta(unsigned old_size,
860 unsigned offset, s64 delta,
862 unsigned n, unsigned d)
866 if (flags & BTREE_TRIGGER_OVERWRITE_SPLIT) {
867 BUG_ON(offset + -delta > old_size);
869 return -disk_sectors_scaled(n, d, old_size) +
870 disk_sectors_scaled(n, d, offset) +
871 disk_sectors_scaled(n, d, old_size - offset + delta);
872 } else if (flags & BTREE_TRIGGER_OVERWRITE) {
873 BUG_ON(offset + -delta > old_size);
875 return -disk_sectors_scaled(n, d, old_size) +
876 disk_sectors_scaled(n, d, old_size + delta);
878 return disk_sectors_scaled(n, d, delta);
882 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
883 unsigned offset, s64 delta,
886 return __ptr_disk_sectors_delta(p.crc.live_size,
887 offset, delta, flags,
888 p.crc.compressed_size,
889 p.crc.uncompressed_size);
892 static int check_bucket_ref(struct bch_fs *c, struct bkey_s_c k,
893 const struct bch_extent_ptr *ptr,
894 s64 sectors, enum bch_data_type ptr_data_type,
895 u8 bucket_gen, u8 bucket_data_type,
896 u16 dirty_sectors, u16 cached_sectors)
898 size_t bucket_nr = PTR_BUCKET_NR(bch_dev_bkey_exists(c, ptr->dev), ptr);
899 u16 bucket_sectors = !ptr->cached
904 if (gen_after(ptr->gen, bucket_gen)) {
905 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
906 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
908 ptr->dev, bucket_nr, bucket_gen,
909 bch2_data_types[bucket_data_type ?: ptr_data_type],
911 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
915 if (gen_cmp(bucket_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
916 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
917 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
919 ptr->dev, bucket_nr, bucket_gen,
920 bch2_data_types[bucket_data_type ?: ptr_data_type],
922 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
926 if (bucket_gen != ptr->gen && !ptr->cached) {
927 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
928 "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
930 ptr->dev, bucket_nr, bucket_gen,
931 bch2_data_types[bucket_data_type ?: ptr_data_type],
933 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
937 if (bucket_gen != ptr->gen)
940 if (bucket_data_type && ptr_data_type &&
941 bucket_data_type != ptr_data_type) {
942 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
943 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
945 ptr->dev, bucket_nr, bucket_gen,
946 bch2_data_types[bucket_data_type],
947 bch2_data_types[ptr_data_type],
948 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
952 if ((unsigned) (bucket_sectors + sectors) > U16_MAX) {
953 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
954 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
956 ptr->dev, bucket_nr, bucket_gen,
957 bch2_data_types[bucket_data_type ?: ptr_data_type],
958 bucket_sectors, sectors,
959 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
966 static int bucket_set_stripe(struct bch_fs *c, struct bkey_s_c k,
968 struct bch_fs_usage *fs_usage,
969 u64 journal_seq, unsigned flags,
972 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
973 unsigned nr_data = s->nr_blocks - s->nr_redundant;
974 bool parity = ptr_idx >= nr_data;
975 const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
976 bool gc = flags & BTREE_TRIGGER_GC;
977 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
978 struct bucket *g = PTR_BUCKET(ca, ptr, gc);
979 struct bucket_mark new, old;
984 g->ec_redundancy = s->nr_redundant;
986 old = bucket_cmpxchg(g, new, ({
987 ret = check_bucket_ref(c, k, ptr, 0, 0, new.gen, new.data_type,
988 new.dirty_sectors, new.cached_sectors);
992 if (new.stripe && enabled)
993 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
994 "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
995 ptr->dev, PTR_BUCKET_NR(ca, ptr), new.gen,
996 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
998 if (!new.stripe && !enabled)
999 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1000 "bucket %u:%zu gen %u: deleting stripe but not marked\n%s",
1001 ptr->dev, PTR_BUCKET_NR(ca, ptr), new.gen,
1002 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
1004 new.stripe = enabled;
1006 if ((flags & BTREE_TRIGGER_GC) && parity) {
1007 new.data_type = enabled ? BCH_DATA_parity : 0;
1008 new.dirty_sectors = enabled ? le16_to_cpu(s->sectors): 0;
1012 new.journal_seq_valid = 1;
1013 new.journal_seq = journal_seq;
1018 g->ec_redundancy = 0;
1020 bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
1024 static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
1025 const struct bch_extent_ptr *ptr,
1026 s64 sectors, enum bch_data_type ptr_data_type,
1027 u8 bucket_gen, u8 *bucket_data_type,
1028 u16 *dirty_sectors, u16 *cached_sectors)
1030 u16 *dst_sectors = !ptr->cached
1033 int ret = check_bucket_ref(c, k, ptr, sectors, ptr_data_type,
1034 bucket_gen, *bucket_data_type,
1035 *dirty_sectors, *cached_sectors);
1040 *dst_sectors += sectors;
1041 *bucket_data_type = *dirty_sectors || *cached_sectors
1042 ? ptr_data_type : 0;
1046 static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
1047 struct extent_ptr_decoded p,
1048 s64 sectors, enum bch_data_type data_type,
1049 struct bch_fs_usage *fs_usage,
1050 u64 journal_seq, unsigned flags)
1052 bool gc = flags & BTREE_TRIGGER_GC;
1053 struct bucket_mark old, new;
1054 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1055 struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
1056 u8 bucket_data_type;
1060 v = atomic64_read(&g->_mark.v);
1062 new.v.counter = old.v.counter = v;
1063 bucket_data_type = new.data_type;
1065 ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, new.gen,
1068 &new.cached_sectors);
1072 new.data_type = bucket_data_type;
1075 new.journal_seq_valid = 1;
1076 new.journal_seq = journal_seq;
1079 if (flags & BTREE_TRIGGER_NOATOMIC) {
1083 } while ((v = atomic64_cmpxchg(&g->_mark.v,
1085 new.v.counter)) != old.v.counter);
1087 bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
1089 BUG_ON(!gc && bucket_became_unavailable(old, new));
1094 static int bch2_mark_stripe_ptr(struct bch_fs *c,
1095 struct bch_extent_stripe_ptr p,
1096 enum bch_data_type data_type,
1097 struct bch_fs_usage *fs_usage,
1098 s64 sectors, unsigned flags)
1100 bool gc = flags & BTREE_TRIGGER_GC;
1101 struct bch_replicas_padded r;
1103 unsigned i, blocks_nonempty = 0;
1105 m = genradix_ptr(&c->stripes[gc], p.idx);
1107 spin_lock(&c->ec_stripes_heap_lock);
1109 if (!m || !m->alive) {
1110 spin_unlock(&c->ec_stripes_heap_lock);
1111 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
1116 m->block_sectors[p.block] += sectors;
1120 for (i = 0; i < m->nr_blocks; i++)
1121 blocks_nonempty += m->block_sectors[i] != 0;
1123 if (m->blocks_nonempty != blocks_nonempty) {
1124 m->blocks_nonempty = blocks_nonempty;
1126 bch2_stripes_heap_update(c, m, p.idx);
1129 spin_unlock(&c->ec_stripes_heap_lock);
1131 r.e.data_type = data_type;
1132 update_replicas(c, fs_usage, &r.e, sectors);
1137 static int bch2_mark_extent(struct bch_fs *c,
1138 struct bkey_s_c old, struct bkey_s_c new,
1139 unsigned offset, s64 sectors,
1140 enum bch_data_type data_type,
1141 struct bch_fs_usage *fs_usage,
1142 unsigned journal_seq, unsigned flags)
1144 struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1145 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1146 const union bch_extent_entry *entry;
1147 struct extent_ptr_decoded p;
1148 struct bch_replicas_padded r;
1149 s64 dirty_sectors = 0;
1153 r.e.data_type = data_type;
1155 r.e.nr_required = 1;
1159 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1160 s64 disk_sectors = data_type == BCH_DATA_btree
1162 : ptr_disk_sectors_delta(p, offset, sectors, flags);
1164 ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
1165 fs_usage, journal_seq, flags);
1173 update_cached_sectors(c, fs_usage, p.ptr.dev,
1175 } else if (!p.has_ec) {
1176 dirty_sectors += disk_sectors;
1177 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1179 ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
1180 fs_usage, disk_sectors, flags);
1185 * There may be other dirty pointers in this extent, but
1186 * if so they're not required for mounting if we have an
1187 * erasure coded pointer in this extent:
1189 r.e.nr_required = 0;
1194 update_replicas(c, fs_usage, &r.e, dirty_sectors);
1199 static int bch2_mark_stripe(struct bch_fs *c,
1200 struct bkey_s_c old, struct bkey_s_c new,
1201 struct bch_fs_usage *fs_usage,
1202 u64 journal_seq, unsigned flags)
1204 bool gc = flags & BTREE_TRIGGER_GC;
1205 size_t idx = new.k->p.offset;
1206 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1207 ? bkey_s_c_to_stripe(old).v : NULL;
1208 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1209 ? bkey_s_c_to_stripe(new).v : NULL;
1210 struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1214 if (!m || (old_s && !m->alive)) {
1215 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1222 for (i = 0; i < old_s->nr_blocks; i++) {
1223 ret = bucket_set_stripe(c, old, i, fs_usage,
1224 journal_seq, flags, false);
1229 if (!gc && m->on_heap) {
1230 spin_lock(&c->ec_stripes_heap_lock);
1231 bch2_stripes_heap_del(c, m, idx);
1232 spin_unlock(&c->ec_stripes_heap_lock);
1236 update_replicas(c, fs_usage, &m->r.e,
1237 -((s64) m->sectors * m->nr_redundant));
1239 memset(m, 0, sizeof(*m));
1241 BUG_ON(old_s && new_s->nr_blocks != old_s->nr_blocks);
1242 BUG_ON(old_s && new_s->nr_redundant != old_s->nr_redundant);
1244 for (i = 0; i < new_s->nr_blocks; i++) {
1246 memcmp(new_s->ptrs + i,
1248 sizeof(struct bch_extent_ptr))) {
1251 bucket_set_stripe(c, old, i, fs_usage,
1252 journal_seq, flags, false);
1256 ret = bucket_set_stripe(c, new, i, fs_usage,
1257 journal_seq, flags, true);
1264 m->sectors = le16_to_cpu(new_s->sectors);
1265 m->algorithm = new_s->algorithm;
1266 m->nr_blocks = new_s->nr_blocks;
1267 m->nr_redundant = new_s->nr_redundant;
1268 m->blocks_nonempty = 0;
1270 for (i = 0; i < new_s->nr_blocks; i++) {
1271 m->block_sectors[i] =
1272 stripe_blockcount_get(new_s, i);
1273 m->blocks_nonempty += !!m->block_sectors[i];
1277 update_replicas(c, fs_usage, &m->r.e,
1278 -((s64) m->sectors * m->nr_redundant));
1280 bch2_bkey_to_replicas(&m->r.e, new);
1283 update_replicas(c, fs_usage, &m->r.e,
1284 ((s64) m->sectors * m->nr_redundant));
1287 spin_lock(&c->ec_stripes_heap_lock);
1288 bch2_stripes_heap_update(c, m, idx);
1289 spin_unlock(&c->ec_stripes_heap_lock);
1296 static int bch2_mark_key_locked(struct bch_fs *c,
1297 struct bkey_s_c old,
1298 struct bkey_s_c new,
1299 unsigned offset, s64 sectors,
1300 struct bch_fs_usage *fs_usage,
1301 u64 journal_seq, unsigned flags)
1303 struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1306 BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1310 if (!fs_usage || (flags & BTREE_TRIGGER_GC))
1311 fs_usage = fs_usage_ptr(c, journal_seq,
1312 flags & BTREE_TRIGGER_GC);
1314 switch (k.k->type) {
1315 case KEY_TYPE_alloc:
1316 ret = bch2_mark_alloc(c, old, new, fs_usage, journal_seq, flags);
1318 case KEY_TYPE_btree_ptr:
1319 case KEY_TYPE_btree_ptr_v2:
1320 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1321 ? c->opts.btree_node_size
1322 : -c->opts.btree_node_size;
1324 ret = bch2_mark_extent(c, old, new, offset, sectors,
1325 BCH_DATA_btree, fs_usage, journal_seq, flags);
1327 case KEY_TYPE_extent:
1328 case KEY_TYPE_reflink_v:
1329 ret = bch2_mark_extent(c, old, new, offset, sectors,
1330 BCH_DATA_user, fs_usage, journal_seq, flags);
1332 case KEY_TYPE_stripe:
1333 ret = bch2_mark_stripe(c, old, new, fs_usage, journal_seq, flags);
1335 case KEY_TYPE_inode:
1336 if (!(flags & BTREE_TRIGGER_OVERWRITE))
1337 fs_usage->nr_inodes++;
1339 fs_usage->nr_inodes--;
1341 case KEY_TYPE_reservation: {
1342 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1344 sectors *= replicas;
1345 replicas = clamp_t(unsigned, replicas, 1,
1346 ARRAY_SIZE(fs_usage->persistent_reserved));
1348 fs_usage->reserved += sectors;
1349 fs_usage->persistent_reserved[replicas - 1] += sectors;
1359 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c new,
1360 unsigned offset, s64 sectors,
1361 struct bch_fs_usage *fs_usage,
1362 u64 journal_seq, unsigned flags)
1364 struct bkey deleted;
1365 struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
1368 bkey_init(&deleted);
1370 percpu_down_read(&c->mark_lock);
1371 ret = bch2_mark_key_locked(c, old, new, offset, sectors,
1372 fs_usage, journal_seq,
1373 BTREE_TRIGGER_INSERT|flags);
1374 percpu_up_read(&c->mark_lock);
1379 int bch2_mark_update(struct btree_trans *trans,
1380 struct btree_iter *iter,
1382 struct bch_fs_usage *fs_usage,
1385 struct bch_fs *c = trans->c;
1386 struct btree *b = iter_l(iter)->b;
1387 struct btree_node_iter node_iter = iter_l(iter)->iter;
1388 struct bkey_packed *_old;
1389 struct bkey_s_c old;
1390 struct bkey unpacked;
1393 if (unlikely(flags & BTREE_TRIGGER_NORUN))
1396 if (!btree_node_type_needs_gc(iter->btree_id))
1399 bkey_init(&unpacked);
1400 old = (struct bkey_s_c) { &unpacked, NULL };
1402 if (!btree_node_type_is_extents(iter->btree_id)) {
1403 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
1404 _old = bch2_btree_node_iter_peek(&node_iter, b);
1406 old = bkey_disassemble(b, _old, &unpacked);
1408 struct bkey_cached *ck = (void *) iter->l[0].b;
1411 old = bkey_i_to_s_c(ck->k);
1414 if (old.k->type == new->k.type) {
1415 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1416 fs_usage, trans->journal_res.seq,
1417 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1420 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1421 fs_usage, trans->journal_res.seq,
1422 BTREE_TRIGGER_INSERT|flags);
1423 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1424 fs_usage, trans->journal_res.seq,
1425 BTREE_TRIGGER_OVERWRITE|flags);
1428 BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
1429 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1431 fs_usage, trans->journal_res.seq,
1432 BTREE_TRIGGER_INSERT|flags);
1434 while ((_old = bch2_btree_node_iter_peek(&node_iter, b))) {
1435 unsigned offset = 0;
1438 old = bkey_disassemble(b, _old, &unpacked);
1439 sectors = -((s64) old.k->size);
1441 flags |= BTREE_TRIGGER_OVERWRITE;
1443 if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
1446 switch (bch2_extent_overlap(&new->k, old.k)) {
1447 case BCH_EXTENT_OVERLAP_ALL:
1449 sectors = -((s64) old.k->size);
1451 case BCH_EXTENT_OVERLAP_BACK:
1452 offset = bkey_start_offset(&new->k) -
1453 bkey_start_offset(old.k);
1454 sectors = bkey_start_offset(&new->k) -
1457 case BCH_EXTENT_OVERLAP_FRONT:
1459 sectors = bkey_start_offset(old.k) -
1462 case BCH_EXTENT_OVERLAP_MIDDLE:
1463 offset = bkey_start_offset(&new->k) -
1464 bkey_start_offset(old.k);
1465 sectors = -((s64) new->k.size);
1466 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1470 BUG_ON(sectors >= 0);
1472 ret = bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1473 offset, sectors, fs_usage,
1474 trans->journal_res.seq, flags) ?: 1;
1478 bch2_btree_node_iter_advance(&node_iter, b);
1485 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1486 struct bch_fs_usage *fs_usage)
1488 struct bch_fs *c = trans->c;
1489 struct btree_insert_entry *i;
1490 static int warned_disk_usage = 0;
1491 u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1494 if (!bch2_fs_usage_apply(c, fs_usage, trans->disk_res,
1495 trans->journal_res.seq) ||
1496 warned_disk_usage ||
1497 xchg(&warned_disk_usage, 1))
1500 bch_err(c, "disk usage increased more than %llu sectors reserved",
1503 trans_for_each_update(trans, i) {
1504 pr_err("while inserting");
1505 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1507 pr_err("overlapping with");
1509 if (btree_iter_type(i->iter) != BTREE_ITER_CACHED) {
1510 struct btree *b = iter_l(i->iter)->b;
1511 struct btree_node_iter node_iter = iter_l(i->iter)->iter;
1512 struct bkey_packed *_k;
1514 while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1515 struct bkey unpacked;
1518 pr_info("_k %px format %u", _k, _k->format);
1519 k = bkey_disassemble(b, _k, &unpacked);
1521 if (btree_node_is_extents(b)
1522 ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
1523 : bkey_cmp(i->k->k.p, k.k->p))
1526 bch2_bkey_val_to_text(&PBUF(buf), c, k);
1529 bch2_btree_node_iter_advance(&node_iter, b);
1532 struct bkey_cached *ck = (void *) i->iter->l[0].b;
1535 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
1544 static struct btree_iter *trans_get_update(struct btree_trans *trans,
1545 enum btree_id btree_id, struct bpos pos,
1548 struct btree_insert_entry *i;
1550 trans_for_each_update(trans, i)
1551 if (i->iter->btree_id == btree_id &&
1552 (btree_node_type_is_extents(btree_id)
1553 ? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
1554 bkey_cmp(pos, i->k->k.p) < 0
1555 : !bkey_cmp(pos, i->iter->pos))) {
1556 *k = bkey_i_to_s_c(i->k);
1563 static int trans_get_key(struct btree_trans *trans,
1564 enum btree_id btree_id, struct bpos pos,
1565 struct btree_iter **iter,
1568 unsigned flags = btree_id != BTREE_ID_ALLOC
1570 : BTREE_ITER_CACHED;
1573 *iter = trans_get_update(trans, btree_id, pos, k);
1577 *iter = bch2_trans_get_iter(trans, btree_id, pos,
1578 flags|BTREE_ITER_INTENT);
1580 return PTR_ERR(*iter);
1582 *k = __bch2_btree_iter_peek(*iter, flags);
1585 bch2_trans_iter_put(trans, *iter);
1589 static int bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter **_iter,
1590 const struct bch_extent_ptr *ptr,
1591 struct bkey_alloc_unpacked *u)
1593 struct bch_fs *c = trans->c;
1594 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1595 struct bpos pos = POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
1597 struct btree_iter *iter;
1601 iter = trans_get_update(trans, BTREE_ID_ALLOC, pos, &k);
1603 *u = bch2_alloc_unpack(k);
1605 iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, pos,
1607 BTREE_ITER_CACHED_NOFILL|
1610 return PTR_ERR(iter);
1612 ret = bch2_btree_iter_traverse(iter);
1614 bch2_trans_iter_put(trans, iter);
1618 percpu_down_read(&c->mark_lock);
1619 g = bucket(ca, pos.offset);
1620 *u = alloc_mem_to_key(g, READ_ONCE(g->mark));
1621 percpu_up_read(&c->mark_lock);
1628 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1629 struct bkey_s_c k, struct extent_ptr_decoded p,
1630 s64 sectors, enum bch_data_type data_type)
1632 struct bch_fs *c = trans->c;
1633 struct btree_iter *iter;
1634 struct bkey_alloc_unpacked u;
1635 struct bkey_i_alloc *a;
1638 ret = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
1642 ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, u.gen, &u.data_type,
1643 &u.dirty_sectors, &u.cached_sectors);
1647 a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
1648 ret = PTR_ERR_OR_ZERO(a);
1652 bkey_alloc_init(&a->k_i);
1654 bch2_alloc_pack(a, u);
1655 bch2_trans_update(trans, iter, &a->k_i, 0);
1657 bch2_trans_iter_put(trans, iter);
1661 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1662 struct bch_extent_stripe_ptr p,
1663 s64 sectors, enum bch_data_type data_type)
1665 struct bch_fs *c = trans->c;
1666 struct btree_iter *iter;
1668 struct bkey_i_stripe *s;
1669 struct bch_replicas_padded r;
1672 ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
1676 if (k.k->type != KEY_TYPE_stripe) {
1677 bch2_fs_inconsistent(c,
1678 "pointer to nonexistent stripe %llu",
1684 s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1685 ret = PTR_ERR_OR_ZERO(s);
1689 bkey_reassemble(&s->k_i, k);
1690 stripe_blockcount_set(&s->v, p.block,
1691 stripe_blockcount_get(&s->v, p.block) +
1693 bch2_trans_update(trans, iter, &s->k_i, 0);
1695 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1696 r.e.data_type = data_type;
1697 update_replicas_list(trans, &r.e, sectors);
1699 bch2_trans_iter_put(trans, iter);
1703 static int bch2_trans_mark_extent(struct btree_trans *trans,
1704 struct bkey_s_c k, unsigned offset,
1705 s64 sectors, unsigned flags,
1706 enum bch_data_type data_type)
1708 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1709 const union bch_extent_entry *entry;
1710 struct extent_ptr_decoded p;
1711 struct bch_replicas_padded r;
1712 s64 dirty_sectors = 0;
1716 r.e.data_type = data_type;
1718 r.e.nr_required = 1;
1722 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1723 s64 disk_sectors = data_type == BCH_DATA_btree
1725 : ptr_disk_sectors_delta(p, offset, sectors, flags);
1727 ret = bch2_trans_mark_pointer(trans, k, p, disk_sectors,
1736 update_cached_sectors_list(trans, p.ptr.dev,
1738 } else if (!p.has_ec) {
1739 dirty_sectors += disk_sectors;
1740 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1742 ret = bch2_trans_mark_stripe_ptr(trans, p.ec,
1743 disk_sectors, data_type);
1747 r.e.nr_required = 0;
1752 update_replicas_list(trans, &r.e, dirty_sectors);
1757 static int bch2_trans_mark_stripe(struct btree_trans *trans,
1761 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
1762 unsigned nr_data = s->nr_blocks - s->nr_redundant;
1763 struct bch_replicas_padded r;
1764 struct bkey_alloc_unpacked u;
1765 struct bkey_i_alloc *a;
1766 struct btree_iter *iter;
1767 bool deleting = flags & BTREE_TRIGGER_OVERWRITE;
1768 s64 sectors = le16_to_cpu(s->sectors);
1775 bch2_bkey_to_replicas(&r.e, k);
1776 update_replicas_list(trans, &r.e, sectors * s->nr_redundant);
1779 * The allocator code doesn't necessarily update bucket gens in the
1780 * btree when incrementing them, right before handing out new buckets -
1781 * we just need to persist those updates here along with the new stripe:
1784 for (i = 0; i < s->nr_blocks && !ret; i++) {
1785 bool parity = i >= nr_data;
1787 ret = bch2_trans_start_alloc_update(trans, &iter,
1793 u.dirty_sectors += sectors;
1794 u.data_type = u.dirty_sectors
1799 a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
1800 ret = PTR_ERR_OR_ZERO(a);
1804 bkey_alloc_init(&a->k_i);
1806 bch2_alloc_pack(a, u);
1807 bch2_trans_update(trans, iter, &a->k_i, 0);
1809 bch2_trans_iter_put(trans, iter);
1815 static __le64 *bkey_refcount(struct bkey_i *k)
1817 switch (k->k.type) {
1818 case KEY_TYPE_reflink_v:
1819 return &bkey_i_to_reflink_v(k)->v.refcount;
1820 case KEY_TYPE_indirect_inline_data:
1821 return &bkey_i_to_indirect_inline_data(k)->v.refcount;
1827 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1828 struct bkey_s_c_reflink_p p,
1829 u64 idx, unsigned sectors,
1832 struct bch_fs *c = trans->c;
1833 struct btree_iter *iter;
1839 ret = trans_get_key(trans, BTREE_ID_REFLINK,
1840 POS(0, idx), &iter, &k);
1844 if ((flags & BTREE_TRIGGER_OVERWRITE) &&
1845 (bkey_start_offset(k.k) < idx ||
1846 k.k->p.offset > idx + sectors))
1849 sectors = k.k->p.offset - idx;
1851 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1852 ret = PTR_ERR_OR_ZERO(n);
1856 bkey_reassemble(n, k);
1858 refcount = bkey_refcount(n);
1860 bch2_fs_inconsistent(c,
1861 "%llu:%llu len %u points to nonexistent indirect extent %llu",
1862 p.k->p.inode, p.k->p.offset, p.k->size, idx);
1867 le64_add_cpu(refcount, !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1);
1870 n->k.type = KEY_TYPE_deleted;
1871 set_bkey_val_u64s(&n->k, 0);
1874 bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1875 BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1877 bch2_trans_update(trans, iter, n, 0);
1881 bch2_trans_iter_put(trans, iter);
1885 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1886 struct bkey_s_c_reflink_p p, unsigned offset,
1887 s64 sectors, unsigned flags)
1889 u64 idx = le64_to_cpu(p.v->idx) + offset;
1892 sectors = abs(sectors);
1893 BUG_ON(offset + sectors > p.k->size);
1896 ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors, flags);
1901 sectors = max_t(s64, 0LL, sectors - ret);
1908 int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
1909 unsigned offset, s64 sectors, unsigned flags)
1911 struct replicas_delta_list *d;
1912 struct bch_fs *c = trans->c;
1914 switch (k.k->type) {
1915 case KEY_TYPE_btree_ptr:
1916 case KEY_TYPE_btree_ptr_v2:
1917 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1918 ? c->opts.btree_node_size
1919 : -c->opts.btree_node_size;
1921 return bch2_trans_mark_extent(trans, k, offset, sectors,
1922 flags, BCH_DATA_btree);
1923 case KEY_TYPE_extent:
1924 case KEY_TYPE_reflink_v:
1925 return bch2_trans_mark_extent(trans, k, offset, sectors,
1926 flags, BCH_DATA_user);
1927 case KEY_TYPE_stripe:
1928 return bch2_trans_mark_stripe(trans, k, flags);
1929 case KEY_TYPE_inode:
1930 d = replicas_deltas_realloc(trans, 0);
1932 if (!(flags & BTREE_TRIGGER_OVERWRITE))
1937 case KEY_TYPE_reservation: {
1938 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1940 d = replicas_deltas_realloc(trans, 0);
1942 sectors *= replicas;
1943 replicas = clamp_t(unsigned, replicas, 1,
1944 ARRAY_SIZE(d->persistent_reserved));
1946 d->persistent_reserved[replicas - 1] += sectors;
1949 case KEY_TYPE_reflink_p:
1950 return bch2_trans_mark_reflink_p(trans,
1951 bkey_s_c_to_reflink_p(k),
1952 offset, sectors, flags);
1958 int bch2_trans_mark_update(struct btree_trans *trans,
1959 struct btree_iter *iter,
1960 struct bkey_i *insert,
1963 struct btree *b = iter_l(iter)->b;
1964 struct btree_node_iter node_iter = iter_l(iter)->iter;
1965 struct bkey_packed *_k;
1968 if (unlikely(flags & BTREE_TRIGGER_NORUN))
1971 if (!btree_node_type_needs_gc(iter->btree_id))
1974 ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(insert),
1975 0, insert->k.size, BTREE_TRIGGER_INSERT);
1979 if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
1980 struct bkey_cached *ck = (void *) iter->l[0].b;
1982 return bch2_trans_mark_key(trans, bkey_i_to_s_c(ck->k),
1983 0, 0, BTREE_TRIGGER_OVERWRITE);
1986 while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1987 struct bkey unpacked;
1989 unsigned offset = 0;
1991 unsigned flags = BTREE_TRIGGER_OVERWRITE;
1993 k = bkey_disassemble(b, _k, &unpacked);
1995 if (btree_node_is_extents(b)
1996 ? bkey_cmp(insert->k.p, bkey_start_pos(k.k)) <= 0
1997 : bkey_cmp(insert->k.p, k.k->p))
2000 if (btree_node_is_extents(b)) {
2001 switch (bch2_extent_overlap(&insert->k, k.k)) {
2002 case BCH_EXTENT_OVERLAP_ALL:
2004 sectors = -((s64) k.k->size);
2006 case BCH_EXTENT_OVERLAP_BACK:
2007 offset = bkey_start_offset(&insert->k) -
2008 bkey_start_offset(k.k);
2009 sectors = bkey_start_offset(&insert->k) -
2012 case BCH_EXTENT_OVERLAP_FRONT:
2014 sectors = bkey_start_offset(k.k) -
2017 case BCH_EXTENT_OVERLAP_MIDDLE:
2018 offset = bkey_start_offset(&insert->k) -
2019 bkey_start_offset(k.k);
2020 sectors = -((s64) insert->k.size);
2021 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
2025 BUG_ON(sectors >= 0);
2028 ret = bch2_trans_mark_key(trans, k, offset, sectors, flags);
2032 bch2_btree_node_iter_advance(&node_iter, b);
2038 /* Disk reservations: */
2040 static u64 bch2_recalc_sectors_available(struct bch_fs *c)
2042 percpu_u64_set(&c->pcpu->sectors_available, 0);
2044 return avail_factor(__bch2_fs_usage_read_short(c).free);
2047 void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
2049 percpu_down_read(&c->mark_lock);
2050 this_cpu_sub(c->usage[0]->online_reserved,
2052 percpu_up_read(&c->mark_lock);
2057 #define SECTORS_CACHE 1024
2059 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
2060 unsigned sectors, int flags)
2062 struct bch_fs_pcpu *pcpu;
2064 s64 sectors_available;
2067 percpu_down_read(&c->mark_lock);
2069 pcpu = this_cpu_ptr(c->pcpu);
2071 if (sectors <= pcpu->sectors_available)
2074 v = atomic64_read(&c->sectors_available);
2077 get = min((u64) sectors + SECTORS_CACHE, old);
2079 if (get < sectors) {
2081 percpu_up_read(&c->mark_lock);
2084 } while ((v = atomic64_cmpxchg(&c->sectors_available,
2085 old, old - get)) != old);
2087 pcpu->sectors_available += get;
2090 pcpu->sectors_available -= sectors;
2091 this_cpu_add(c->usage[0]->online_reserved, sectors);
2092 res->sectors += sectors;
2095 percpu_up_read(&c->mark_lock);
2099 percpu_down_write(&c->mark_lock);
2101 sectors_available = bch2_recalc_sectors_available(c);
2103 if (sectors <= sectors_available ||
2104 (flags & BCH_DISK_RESERVATION_NOFAIL)) {
2105 atomic64_set(&c->sectors_available,
2106 max_t(s64, 0, sectors_available - sectors));
2107 this_cpu_add(c->usage[0]->online_reserved, sectors);
2108 res->sectors += sectors;
2111 atomic64_set(&c->sectors_available, sectors_available);
2115 percpu_up_write(&c->mark_lock);
2120 /* Startup/shutdown: */
2122 static void buckets_free_rcu(struct rcu_head *rcu)
2124 struct bucket_array *buckets =
2125 container_of(rcu, struct bucket_array, rcu);
2128 sizeof(struct bucket_array) +
2129 buckets->nbuckets * sizeof(struct bucket));
2132 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2134 struct bucket_array *buckets = NULL, *old_buckets = NULL;
2135 unsigned long *buckets_nouse = NULL;
2136 alloc_fifo free[RESERVE_NR];
2137 alloc_fifo free_inc;
2138 alloc_heap alloc_heap;
2140 size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
2141 ca->mi.bucket_size / c->opts.btree_node_size);
2142 /* XXX: these should be tunable */
2143 size_t reserve_none = max_t(size_t, 1, nbuckets >> 9);
2144 size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 7);
2145 size_t free_inc_nr = max(max_t(size_t, 1, nbuckets >> 12),
2147 bool resize = ca->buckets[0] != NULL;
2151 memset(&free, 0, sizeof(free));
2152 memset(&free_inc, 0, sizeof(free_inc));
2153 memset(&alloc_heap, 0, sizeof(alloc_heap));
2155 if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
2156 nbuckets * sizeof(struct bucket),
2157 GFP_KERNEL|__GFP_ZERO)) ||
2158 !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2159 sizeof(unsigned long),
2160 GFP_KERNEL|__GFP_ZERO)) ||
2161 !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
2162 !init_fifo(&free[RESERVE_MOVINGGC],
2163 copygc_reserve, GFP_KERNEL) ||
2164 !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
2165 !init_fifo(&free_inc, free_inc_nr, GFP_KERNEL) ||
2166 !init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
2169 buckets->first_bucket = ca->mi.first_bucket;
2170 buckets->nbuckets = nbuckets;
2172 bch2_copygc_stop(c);
2175 down_write(&c->gc_lock);
2176 down_write(&ca->bucket_lock);
2177 percpu_down_write(&c->mark_lock);
2180 old_buckets = bucket_array(ca);
2183 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
2187 n * sizeof(struct bucket));
2188 memcpy(buckets_nouse,
2190 BITS_TO_LONGS(n) * sizeof(unsigned long));
2193 rcu_assign_pointer(ca->buckets[0], buckets);
2194 buckets = old_buckets;
2196 swap(ca->buckets_nouse, buckets_nouse);
2199 percpu_up_write(&c->mark_lock);
2200 up_write(&c->gc_lock);
2203 spin_lock(&c->freelist_lock);
2204 for (i = 0; i < RESERVE_NR; i++) {
2205 fifo_move(&free[i], &ca->free[i]);
2206 swap(ca->free[i], free[i]);
2208 fifo_move(&free_inc, &ca->free_inc);
2209 swap(ca->free_inc, free_inc);
2210 spin_unlock(&c->freelist_lock);
2212 /* with gc lock held, alloc_heap can't be in use: */
2213 swap(ca->alloc_heap, alloc_heap);
2215 nbuckets = ca->mi.nbuckets;
2218 up_write(&ca->bucket_lock);
2222 free_heap(&alloc_heap);
2223 free_fifo(&free_inc);
2224 for (i = 0; i < RESERVE_NR; i++)
2225 free_fifo(&free[i]);
2226 kvpfree(buckets_nouse,
2227 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2229 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2234 void bch2_dev_buckets_free(struct bch_dev *ca)
2238 free_heap(&ca->alloc_heap);
2239 free_fifo(&ca->free_inc);
2240 for (i = 0; i < RESERVE_NR; i++)
2241 free_fifo(&ca->free[i]);
2242 kvpfree(ca->buckets_nouse,
2243 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2244 kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2245 sizeof(struct bucket_array) +
2246 ca->mi.nbuckets * sizeof(struct bucket));
2248 free_percpu(ca->usage[0]);
2251 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2253 if (!(ca->usage[0] = alloc_percpu(struct bch_dev_usage)))
2256 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;