1 // SPDX-License-Identifier: GPL-2.0
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
8 * - free bucket: mark == 0
9 * The bucket contains no data and will not be read
11 * - allocator bucket: owned_by_allocator == 1
12 * The bucket is on a free list, or it is an open bucket
14 * - cached bucket: owned_by_allocator == 0 &&
15 * dirty_sectors == 0 &&
17 * The bucket contains data but may be safely discarded as there are
18 * enough replicas of the data on other cache devices, or it has been
19 * written back to the backing device
21 * - dirty bucket: owned_by_allocator == 0 &&
23 * The bucket contains data that we must not discard (either only copy,
24 * or one of the 'main copies' for data requiring multiple replicas)
26 * - metadata bucket: owned_by_allocator == 0 && is_metadata == 1
27 * This is a btree node, journal or gen/prio bucket
31 * bucket invalidated => bucket on freelist => open bucket =>
32 * [dirty bucket =>] cached bucket => bucket invalidated => ...
34 * Note that cache promotion can skip the dirty bucket step, as data
35 * is copied from a deeper tier to a shallower tier, onto a cached
37 * Note also that a cached bucket can spontaneously become dirty --
40 * Only a traversal of the key space can determine whether a bucket is
41 * truly dirty or cached.
45 * - free => allocator: bucket was invalidated
46 * - cached => allocator: bucket was invalidated
48 * - allocator => dirty: open bucket was filled up
49 * - allocator => cached: open bucket was filled up
50 * - allocator => metadata: metadata was allocated
52 * - dirty => cached: dirty sectors were copied to a deeper tier
53 * - dirty => free: dirty sectors were overwritten or moved (copy gc)
54 * - cached => free: cached sectors were overwritten
56 * - metadata => free: metadata was freed
59 * - cached => dirty: a device was removed so formerly replicated data
60 * is no longer sufficiently replicated
61 * - free => cached: cannot happen
62 * - free => dirty: cannot happen
63 * - free => metadata: cannot happen
67 #include "alloc_background.h"
70 #include "btree_update.h"
77 #include <linux/preempt.h>
78 #include <trace/events/bcachefs.h>
80 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
81 enum bch_data_type data_type,
86 fs_usage->btree += sectors;
90 fs_usage->data += sectors;
93 fs_usage->cached += sectors;
101 * Clear journal_seq_valid for buckets for which it's not needed, to prevent
104 void bch2_bucket_seq_cleanup(struct bch_fs *c)
106 u64 journal_seq = atomic64_read(&c->journal.seq);
107 u16 last_seq_ondisk = c->journal.last_seq_ondisk;
109 struct bucket_array *buckets;
111 struct bucket_mark m;
114 if (journal_seq - c->last_bucket_seq_cleanup <
115 (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
118 c->last_bucket_seq_cleanup = journal_seq;
120 for_each_member_device(ca, c, i) {
121 down_read(&ca->bucket_lock);
122 buckets = bucket_array(ca);
124 for_each_bucket(g, buckets) {
125 bucket_cmpxchg(g, m, ({
126 if (!m.journal_seq_valid ||
127 bucket_needs_journal_commit(m, last_seq_ondisk))
130 m.journal_seq_valid = 0;
133 up_read(&ca->bucket_lock);
137 void bch2_fs_usage_initialize(struct bch_fs *c)
139 struct bch_fs_usage *usage;
143 percpu_down_write(&c->mark_lock);
144 usage = c->usage_base;
146 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
147 bch2_fs_usage_acc_to_base(c, i);
149 for (i = 0; i < BCH_REPLICAS_MAX; i++)
150 usage->reserved += usage->persistent_reserved[i];
152 for (i = 0; i < c->replicas.nr; i++) {
153 struct bch_replicas_entry *e =
154 cpu_replicas_entry(&c->replicas, i);
156 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
159 for_each_member_device(ca, c, i) {
160 struct bch_dev_usage dev = bch2_dev_usage_read(ca);
162 usage->hidden += (dev.d[BCH_DATA_sb].buckets +
163 dev.d[BCH_DATA_journal].buckets) *
167 percpu_up_write(&c->mark_lock);
170 void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage *fs_usage)
172 if (fs_usage == c->usage_scratch)
173 mutex_unlock(&c->usage_scratch_lock);
178 struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *c)
180 struct bch_fs_usage *ret;
181 unsigned bytes = fs_usage_u64s(c) * sizeof(u64);
183 ret = kzalloc(bytes, GFP_NOWAIT|__GFP_NOWARN);
187 if (mutex_trylock(&c->usage_scratch_lock))
190 ret = kzalloc(bytes, GFP_NOFS);
194 mutex_lock(&c->usage_scratch_lock);
196 ret = c->usage_scratch;
197 memset(ret, 0, bytes);
201 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
202 unsigned journal_seq,
205 return this_cpu_ptr(gc
207 : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
210 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
212 struct bch_fs *c = ca->fs;
213 struct bch_dev_usage ret;
214 unsigned seq, i, u64s = dev_usage_u64s();
217 seq = read_seqcount_begin(&c->usage_lock);
218 memcpy(&ret, ca->usage_base, u64s * sizeof(u64));
219 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
220 acc_u64s_percpu((u64 *) &ret, (u64 __percpu *) ca->usage[i], u64s);
221 } while (read_seqcount_retry(&c->usage_lock, seq));
226 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
227 unsigned journal_seq,
230 return this_cpu_ptr(gc
232 : c->usage[journal_seq & JOURNAL_BUF_MASK]);
235 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
237 ssize_t offset = v - (u64 *) c->usage_base;
241 BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
242 percpu_rwsem_assert_held(&c->mark_lock);
245 seq = read_seqcount_begin(&c->usage_lock);
248 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
249 ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
250 } while (read_seqcount_retry(&c->usage_lock, seq));
255 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
257 struct bch_fs_usage *ret;
258 unsigned seq, i, v, u64s = fs_usage_u64s(c);
260 ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
264 percpu_down_read(&c->mark_lock);
266 v = fs_usage_u64s(c);
267 if (unlikely(u64s != v)) {
269 percpu_up_read(&c->mark_lock);
275 seq = read_seqcount_begin(&c->usage_lock);
276 memcpy(ret, c->usage_base, u64s * sizeof(u64));
277 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
278 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[i], u64s);
279 } while (read_seqcount_retry(&c->usage_lock, seq));
284 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
287 unsigned i, u64s = fs_usage_u64s(c);
289 BUG_ON(idx >= ARRAY_SIZE(c->usage));
292 write_seqcount_begin(&c->usage_lock);
294 acc_u64s_percpu((u64 *) c->usage_base,
295 (u64 __percpu *) c->usage[idx], u64s);
296 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
299 for_each_member_device_rcu(ca, c, i, NULL) {
300 u64s = dev_usage_u64s();
302 acc_u64s_percpu((u64 *) ca->usage_base,
303 (u64 __percpu *) ca->usage[idx], u64s);
304 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
308 write_seqcount_end(&c->usage_lock);
312 void bch2_fs_usage_to_text(struct printbuf *out,
314 struct bch_fs_usage *fs_usage)
318 pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
320 pr_buf(out, "hidden:\t\t\t\t%llu\n",
322 pr_buf(out, "data:\t\t\t\t%llu\n",
324 pr_buf(out, "cached:\t\t\t\t%llu\n",
326 pr_buf(out, "reserved:\t\t\t%llu\n",
328 pr_buf(out, "nr_inodes:\t\t\t%llu\n",
329 fs_usage->nr_inodes);
330 pr_buf(out, "online reserved:\t\t%llu\n",
331 fs_usage->online_reserved);
334 i < ARRAY_SIZE(fs_usage->persistent_reserved);
336 pr_buf(out, "%u replicas:\n", i + 1);
337 pr_buf(out, "\treserved:\t\t%llu\n",
338 fs_usage->persistent_reserved[i]);
341 for (i = 0; i < c->replicas.nr; i++) {
342 struct bch_replicas_entry *e =
343 cpu_replicas_entry(&c->replicas, i);
346 bch2_replicas_entry_to_text(out, e);
347 pr_buf(out, ":\t%llu\n", fs_usage->replicas[i]);
351 #define RESERVE_FACTOR 6
353 static u64 reserve_factor(u64 r)
355 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
358 static u64 avail_factor(u64 r)
360 return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
363 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage *fs_usage)
365 return min(fs_usage->hidden +
368 reserve_factor(fs_usage->reserved +
369 fs_usage->online_reserved),
373 static struct bch_fs_usage_short
374 __bch2_fs_usage_read_short(struct bch_fs *c)
376 struct bch_fs_usage_short ret;
379 ret.capacity = c->capacity -
380 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
382 data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
383 bch2_fs_usage_read_one(c, &c->usage_base->btree);
384 reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
385 bch2_fs_usage_read_one(c, &c->usage_base->online_reserved);
387 ret.used = min(ret.capacity, data + reserve_factor(reserved));
388 ret.free = ret.capacity - ret.used;
390 ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
395 struct bch_fs_usage_short
396 bch2_fs_usage_read_short(struct bch_fs *c)
398 struct bch_fs_usage_short ret;
400 percpu_down_read(&c->mark_lock);
401 ret = __bch2_fs_usage_read_short(c);
402 percpu_up_read(&c->mark_lock);
407 static inline int is_unavailable_bucket(struct bucket_mark m)
409 return !is_available_bucket(m);
412 static inline int bucket_sectors_fragmented(struct bch_dev *ca,
413 struct bucket_mark m)
415 return bucket_sectors_used(m)
416 ? max(0, (int) ca->mi.bucket_size - (int) bucket_sectors_used(m))
420 static inline int is_stripe_data_bucket(struct bucket_mark m)
422 return m.stripe && m.data_type != BCH_DATA_parity;
425 static inline enum bch_data_type bucket_type(struct bucket_mark m)
427 return m.cached_sectors && !m.dirty_sectors
432 static bool bucket_became_unavailable(struct bucket_mark old,
433 struct bucket_mark new)
435 return is_available_bucket(old) &&
436 !is_available_bucket(new);
439 int bch2_fs_usage_apply(struct bch_fs *c,
440 struct bch_fs_usage *fs_usage,
441 struct disk_reservation *disk_res,
442 unsigned journal_seq)
444 s64 added = fs_usage->data + fs_usage->reserved;
445 s64 should_not_have_added;
448 percpu_rwsem_assert_held(&c->mark_lock);
451 * Not allowed to reduce sectors_available except by getting a
454 should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
455 if (WARN_ONCE(should_not_have_added > 0,
456 "disk usage increased by %lli more than reservation of %llu",
457 added, disk_res ? disk_res->sectors : 0)) {
458 atomic64_sub(should_not_have_added, &c->sectors_available);
459 added -= should_not_have_added;
464 disk_res->sectors -= added;
465 fs_usage->online_reserved -= added;
469 acc_u64s((u64 *) fs_usage_ptr(c, journal_seq, false),
470 (u64 *) fs_usage, fs_usage_u64s(c));
476 static inline void account_bucket(struct bch_fs_usage *fs_usage,
477 struct bch_dev_usage *dev_usage,
478 enum bch_data_type type,
481 if (type == BCH_DATA_sb || type == BCH_DATA_journal)
482 fs_usage->hidden += size;
484 dev_usage->d[type].buckets += nr;
487 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
488 struct bch_fs_usage *fs_usage,
489 struct bucket_mark old, struct bucket_mark new,
490 u64 journal_seq, bool gc)
492 struct bch_dev_usage *u;
494 percpu_rwsem_assert_held(&c->mark_lock);
497 u = dev_usage_ptr(ca, journal_seq, gc);
499 if (bucket_type(old))
500 account_bucket(fs_usage, u, bucket_type(old),
501 -1, -ca->mi.bucket_size);
503 if (bucket_type(new))
504 account_bucket(fs_usage, u, bucket_type(new),
505 1, ca->mi.bucket_size);
508 (int) new.owned_by_allocator - (int) old.owned_by_allocator;
509 u->buckets_ec += (int) new.stripe - (int) old.stripe;
510 u->buckets_unavailable +=
511 is_unavailable_bucket(new) - is_unavailable_bucket(old);
513 u->d[old.data_type].sectors -= old.dirty_sectors;
514 u->d[new.data_type].sectors += new.dirty_sectors;
515 u->d[BCH_DATA_cached].sectors +=
516 (int) new.cached_sectors - (int) old.cached_sectors;
518 u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old);
519 u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new);
523 if (!is_available_bucket(old) && is_available_bucket(new))
524 bch2_wake_allocator(ca);
527 static inline int update_replicas(struct bch_fs *c,
528 struct bch_fs_usage *fs_usage,
529 struct bch_replicas_entry *r,
532 int idx = bch2_replicas_entry_idx(c, r);
540 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
541 fs_usage->replicas[idx] += sectors;
545 static inline void update_cached_sectors(struct bch_fs *c,
546 struct bch_fs_usage *fs_usage,
547 unsigned dev, s64 sectors)
549 struct bch_replicas_padded r;
551 bch2_replicas_entry_cached(&r.e, dev);
553 update_replicas(c, fs_usage, &r.e, sectors);
556 static struct replicas_delta_list *
557 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
559 struct replicas_delta_list *d = trans->fs_usage_deltas;
560 unsigned new_size = d ? (d->size + more) * 2 : 128;
562 if (!d || d->used + more > d->size) {
563 d = krealloc(d, sizeof(*d) + new_size, GFP_NOIO|__GFP_ZERO);
567 trans->fs_usage_deltas = d;
572 static inline void update_replicas_list(struct btree_trans *trans,
573 struct bch_replicas_entry *r,
576 struct replicas_delta_list *d;
577 struct replicas_delta *n;
583 b = replicas_entry_bytes(r) + 8;
584 d = replicas_deltas_realloc(trans, b);
586 n = (void *) d->d + d->used;
588 memcpy(&n->r, r, replicas_entry_bytes(r));
592 static inline void update_cached_sectors_list(struct btree_trans *trans,
593 unsigned dev, s64 sectors)
595 struct bch_replicas_padded r;
597 bch2_replicas_entry_cached(&r.e, dev);
599 update_replicas_list(trans, &r.e, sectors);
602 static inline struct replicas_delta *
603 replicas_delta_next(struct replicas_delta *d)
605 return (void *) d + replicas_entry_bytes(&d->r) + 8;
608 int bch2_replicas_delta_list_apply(struct bch_fs *c,
609 struct bch_fs_usage *fs_usage,
610 struct replicas_delta_list *r)
612 struct replicas_delta *d = r->d;
613 struct replicas_delta *top = (void *) r->d + r->used;
616 for (d = r->d; d != top; d = replicas_delta_next(d))
617 if (update_replicas(c, fs_usage, &d->r, d->delta)) {
625 fs_usage->nr_inodes += r->nr_inodes;
627 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
628 fs_usage->reserved += r->persistent_reserved[i];
629 fs_usage->persistent_reserved[i] += r->persistent_reserved[i];
634 for (d = r->d; d != top; d = replicas_delta_next(d))
635 update_replicas(c, fs_usage, &d->r, -d->delta);
639 #define do_mark_fn(fn, c, pos, flags, ...) \
643 percpu_rwsem_assert_held(&c->mark_lock); \
645 for (gc = 0; gc < 2 && !ret; gc++) \
646 if (!gc == !(flags & BTREE_TRIGGER_GC) || \
647 (gc && gc_visited(c, pos))) \
648 ret = fn(c, __VA_ARGS__, gc); \
652 static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
653 size_t b, bool owned_by_allocator,
656 struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
657 struct bucket *g = __bucket(ca, b, gc);
658 struct bucket_mark old, new;
660 old = bucket_cmpxchg(g, new, ({
661 new.owned_by_allocator = owned_by_allocator;
665 * XXX: this is wrong, this means we'll be doing updates to the percpu
666 * buckets_alloc counter that don't have an open journal buffer and
667 * we'll race with the machinery that accumulates that to ca->usage_base
669 bch2_dev_usage_update(c, ca, fs_usage, old, new, 0, gc);
672 !owned_by_allocator && !old.owned_by_allocator);
677 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
678 size_t b, bool owned_by_allocator,
679 struct gc_pos pos, unsigned flags)
683 do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
684 ca, b, owned_by_allocator);
689 static int bch2_mark_alloc(struct bch_fs *c,
690 struct bkey_s_c old, struct bkey_s_c new,
691 struct bch_fs_usage *fs_usage,
692 u64 journal_seq, unsigned flags)
694 bool gc = flags & BTREE_TRIGGER_GC;
695 struct bkey_alloc_unpacked u;
698 struct bucket_mark old_m, m;
700 /* We don't do anything for deletions - do we?: */
701 if (new.k->type != KEY_TYPE_alloc &&
702 new.k->type != KEY_TYPE_alloc_v2)
706 * alloc btree is read in by bch2_alloc_read, not gc:
708 if ((flags & BTREE_TRIGGER_GC) &&
709 !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
712 ca = bch_dev_bkey_exists(c, new.k->p.inode);
714 if (new.k->p.offset >= ca->mi.nbuckets)
717 g = __bucket(ca, new.k->p.offset, gc);
718 u = bch2_alloc_unpack(new);
720 old_m = bucket_cmpxchg(g, m, ({
722 m.data_type = u.data_type;
723 m.dirty_sectors = u.dirty_sectors;
724 m.cached_sectors = u.cached_sectors;
725 m.stripe = u.stripe != 0;
728 m.journal_seq_valid = 1;
729 m.journal_seq = journal_seq;
733 bch2_dev_usage_update(c, ca, fs_usage, old_m, m, journal_seq, gc);
735 g->io_time[READ] = u.read_time;
736 g->io_time[WRITE] = u.write_time;
737 g->oldest_gen = u.oldest_gen;
739 g->stripe = u.stripe;
740 g->stripe_redundancy = u.stripe_redundancy;
743 * need to know if we're getting called from the invalidate path or
747 if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
748 old_m.cached_sectors) {
749 update_cached_sectors(c, fs_usage, ca->dev_idx,
750 -old_m.cached_sectors);
751 trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
752 old_m.cached_sectors);
758 #define checked_add(a, b) \
760 unsigned _res = (unsigned) (a) + (b); \
761 bool overflow = _res > U16_MAX; \
768 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
769 size_t b, enum bch_data_type data_type,
770 unsigned sectors, bool gc)
772 struct bucket *g = __bucket(ca, b, gc);
773 struct bucket_mark old, new;
776 BUG_ON(data_type != BCH_DATA_sb &&
777 data_type != BCH_DATA_journal);
779 old = bucket_cmpxchg(g, new, ({
780 new.data_type = data_type;
781 overflow = checked_add(new.dirty_sectors, sectors);
784 bch2_fs_inconsistent_on(old.data_type &&
785 old.data_type != data_type, c,
786 "different types of data in same bucket: %s, %s",
787 bch2_data_types[old.data_type],
788 bch2_data_types[data_type]);
790 bch2_fs_inconsistent_on(overflow, c,
791 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
792 ca->dev_idx, b, new.gen,
793 bch2_data_types[old.data_type ?: data_type],
794 old.dirty_sectors, sectors);
797 bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc),
803 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
804 size_t b, enum bch_data_type type,
805 unsigned sectors, struct gc_pos pos,
808 BUG_ON(type != BCH_DATA_sb &&
809 type != BCH_DATA_journal);
814 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
815 ca, b, type, sectors);
817 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
823 static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
825 return DIV_ROUND_UP(sectors * n, d);
828 static s64 __ptr_disk_sectors_delta(unsigned old_size,
829 unsigned offset, s64 delta,
831 unsigned n, unsigned d)
835 if (flags & BTREE_TRIGGER_OVERWRITE_SPLIT) {
836 BUG_ON(offset + -delta > old_size);
838 return -disk_sectors_scaled(n, d, old_size) +
839 disk_sectors_scaled(n, d, offset) +
840 disk_sectors_scaled(n, d, old_size - offset + delta);
841 } else if (flags & BTREE_TRIGGER_OVERWRITE) {
842 BUG_ON(offset + -delta > old_size);
844 return -disk_sectors_scaled(n, d, old_size) +
845 disk_sectors_scaled(n, d, old_size + delta);
847 return disk_sectors_scaled(n, d, delta);
851 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
852 unsigned offset, s64 delta,
855 return __ptr_disk_sectors_delta(p.crc.live_size,
856 offset, delta, flags,
857 p.crc.compressed_size,
858 p.crc.uncompressed_size);
861 static int check_bucket_ref(struct bch_fs *c, struct bkey_s_c k,
862 const struct bch_extent_ptr *ptr,
863 s64 sectors, enum bch_data_type ptr_data_type,
864 u8 bucket_gen, u8 bucket_data_type,
865 u16 dirty_sectors, u16 cached_sectors)
867 size_t bucket_nr = PTR_BUCKET_NR(bch_dev_bkey_exists(c, ptr->dev), ptr);
868 u16 bucket_sectors = !ptr->cached
873 if (gen_after(ptr->gen, bucket_gen)) {
874 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
875 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
877 ptr->dev, bucket_nr, bucket_gen,
878 bch2_data_types[bucket_data_type ?: ptr_data_type],
880 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
884 if (gen_cmp(bucket_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
885 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
886 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
888 ptr->dev, bucket_nr, bucket_gen,
889 bch2_data_types[bucket_data_type ?: ptr_data_type],
891 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
895 if (bucket_gen != ptr->gen && !ptr->cached) {
896 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
897 "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
899 ptr->dev, bucket_nr, bucket_gen,
900 bch2_data_types[bucket_data_type ?: ptr_data_type],
902 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
906 if (bucket_gen != ptr->gen)
909 if (bucket_data_type && ptr_data_type &&
910 bucket_data_type != ptr_data_type) {
911 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
912 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
914 ptr->dev, bucket_nr, bucket_gen,
915 bch2_data_types[bucket_data_type],
916 bch2_data_types[ptr_data_type],
917 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
921 if ((unsigned) (bucket_sectors + sectors) > U16_MAX) {
922 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
923 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
925 ptr->dev, bucket_nr, bucket_gen,
926 bch2_data_types[bucket_data_type ?: ptr_data_type],
927 bucket_sectors, sectors,
928 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
935 static int mark_stripe_bucket(struct bch_fs *c, struct bkey_s_c k,
937 struct bch_fs_usage *fs_usage,
938 u64 journal_seq, unsigned flags)
940 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
941 unsigned nr_data = s->nr_blocks - s->nr_redundant;
942 bool parity = ptr_idx >= nr_data;
943 const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
944 bool gc = flags & BTREE_TRIGGER_GC;
945 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
946 struct bucket *g = PTR_BUCKET(ca, ptr, gc);
947 struct bucket_mark new, old;
951 if (g->stripe && g->stripe != k.k->p.offset) {
952 bch2_fs_inconsistent(c,
953 "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
954 ptr->dev, PTR_BUCKET_NR(ca, ptr), new.gen,
955 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
959 old = bucket_cmpxchg(g, new, ({
960 ret = check_bucket_ref(c, k, ptr, 0, 0, new.gen, new.data_type,
961 new.dirty_sectors, new.cached_sectors);
966 new.data_type = BCH_DATA_parity;
967 new.dirty_sectors = le16_to_cpu(s->sectors);
971 new.journal_seq_valid = 1;
972 new.journal_seq = journal_seq;
976 g->stripe = k.k->p.offset;
977 g->stripe_redundancy = s->nr_redundant;
979 bch2_dev_usage_update(c, ca, fs_usage, old, new, journal_seq, gc);
983 static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
984 const struct bch_extent_ptr *ptr,
985 s64 sectors, enum bch_data_type ptr_data_type,
986 u8 bucket_gen, u8 *bucket_data_type,
987 u16 *dirty_sectors, u16 *cached_sectors)
989 u16 *dst_sectors = !ptr->cached
992 int ret = check_bucket_ref(c, k, ptr, sectors, ptr_data_type,
993 bucket_gen, *bucket_data_type,
994 *dirty_sectors, *cached_sectors);
999 *dst_sectors += sectors;
1000 *bucket_data_type = *dirty_sectors || *cached_sectors
1001 ? ptr_data_type : 0;
1005 static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
1006 struct extent_ptr_decoded p,
1007 s64 sectors, enum bch_data_type data_type,
1008 struct bch_fs_usage *fs_usage,
1009 u64 journal_seq, unsigned flags)
1011 bool gc = flags & BTREE_TRIGGER_GC;
1012 struct bucket_mark old, new;
1013 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1014 struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
1015 u8 bucket_data_type;
1019 v = atomic64_read(&g->_mark.v);
1021 new.v.counter = old.v.counter = v;
1022 bucket_data_type = new.data_type;
1024 ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, new.gen,
1027 &new.cached_sectors);
1031 new.data_type = bucket_data_type;
1034 new.journal_seq_valid = 1;
1035 new.journal_seq = journal_seq;
1038 if (flags & BTREE_TRIGGER_NOATOMIC) {
1042 } while ((v = atomic64_cmpxchg(&g->_mark.v,
1044 new.v.counter)) != old.v.counter);
1046 bch2_dev_usage_update(c, ca, fs_usage, old, new, journal_seq, gc);
1048 BUG_ON(!gc && bucket_became_unavailable(old, new));
1053 static int bch2_mark_stripe_ptr(struct bch_fs *c,
1054 struct bch_extent_stripe_ptr p,
1055 enum bch_data_type data_type,
1056 struct bch_fs_usage *fs_usage,
1057 s64 sectors, unsigned flags)
1059 bool gc = flags & BTREE_TRIGGER_GC;
1060 struct bch_replicas_padded r;
1062 unsigned i, blocks_nonempty = 0;
1064 m = genradix_ptr(&c->stripes[gc], p.idx);
1066 spin_lock(&c->ec_stripes_heap_lock);
1068 if (!m || !m->alive) {
1069 spin_unlock(&c->ec_stripes_heap_lock);
1070 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
1075 m->block_sectors[p.block] += sectors;
1079 for (i = 0; i < m->nr_blocks; i++)
1080 blocks_nonempty += m->block_sectors[i] != 0;
1082 if (m->blocks_nonempty != blocks_nonempty) {
1083 m->blocks_nonempty = blocks_nonempty;
1085 bch2_stripes_heap_update(c, m, p.idx);
1088 spin_unlock(&c->ec_stripes_heap_lock);
1090 r.e.data_type = data_type;
1091 update_replicas(c, fs_usage, &r.e, sectors);
1096 static int bch2_mark_extent(struct bch_fs *c,
1097 struct bkey_s_c old, struct bkey_s_c new,
1098 unsigned offset, s64 sectors,
1099 enum bch_data_type data_type,
1100 struct bch_fs_usage *fs_usage,
1101 unsigned journal_seq, unsigned flags)
1103 struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1104 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1105 const union bch_extent_entry *entry;
1106 struct extent_ptr_decoded p;
1107 struct bch_replicas_padded r;
1108 s64 dirty_sectors = 0;
1112 r.e.data_type = data_type;
1114 r.e.nr_required = 1;
1118 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1119 s64 disk_sectors = data_type == BCH_DATA_btree
1121 : ptr_disk_sectors_delta(p, offset, sectors, flags);
1123 ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
1124 fs_usage, journal_seq, flags);
1132 update_cached_sectors(c, fs_usage, p.ptr.dev,
1134 } else if (!p.has_ec) {
1135 dirty_sectors += disk_sectors;
1136 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1138 ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
1139 fs_usage, disk_sectors, flags);
1144 * There may be other dirty pointers in this extent, but
1145 * if so they're not required for mounting if we have an
1146 * erasure coded pointer in this extent:
1148 r.e.nr_required = 0;
1153 update_replicas(c, fs_usage, &r.e, dirty_sectors);
1158 static int bch2_mark_stripe(struct bch_fs *c,
1159 struct bkey_s_c old, struct bkey_s_c new,
1160 struct bch_fs_usage *fs_usage,
1161 u64 journal_seq, unsigned flags)
1163 bool gc = flags & BTREE_TRIGGER_GC;
1164 size_t idx = new.k->p.offset;
1165 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1166 ? bkey_s_c_to_stripe(old).v : NULL;
1167 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1168 ? bkey_s_c_to_stripe(new).v : NULL;
1169 struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1173 BUG_ON(gc && old_s);
1175 if (!m || (old_s && !m->alive)) {
1176 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1182 spin_lock(&c->ec_stripes_heap_lock);
1183 bch2_stripes_heap_del(c, m, idx);
1184 spin_unlock(&c->ec_stripes_heap_lock);
1186 memset(m, 0, sizeof(*m));
1189 m->sectors = le16_to_cpu(new_s->sectors);
1190 m->algorithm = new_s->algorithm;
1191 m->nr_blocks = new_s->nr_blocks;
1192 m->nr_redundant = new_s->nr_redundant;
1193 m->blocks_nonempty = 0;
1195 for (i = 0; i < new_s->nr_blocks; i++) {
1196 m->block_sectors[i] =
1197 stripe_blockcount_get(new_s, i);
1198 m->blocks_nonempty += !!m->block_sectors[i];
1201 bch2_bkey_to_replicas(&m->r.e, new);
1204 spin_lock(&c->ec_stripes_heap_lock);
1205 bch2_stripes_heap_update(c, m, idx);
1206 spin_unlock(&c->ec_stripes_heap_lock);
1212 * gc recalculates this field from stripe ptr
1215 memset(m->block_sectors, 0, sizeof(m->block_sectors));
1216 m->blocks_nonempty = 0;
1218 for (i = 0; i < new_s->nr_blocks; i++) {
1219 ret = mark_stripe_bucket(c, new, i, fs_usage,
1220 journal_seq, flags);
1225 update_replicas(c, fs_usage, &m->r.e,
1226 ((s64) m->sectors * m->nr_redundant));
1232 static int bch2_mark_key_locked(struct bch_fs *c,
1233 struct bkey_s_c old,
1234 struct bkey_s_c new,
1235 unsigned offset, s64 sectors,
1236 struct bch_fs_usage *fs_usage,
1237 u64 journal_seq, unsigned flags)
1239 struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1242 BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1246 if (!fs_usage || (flags & BTREE_TRIGGER_GC))
1247 fs_usage = fs_usage_ptr(c, journal_seq,
1248 flags & BTREE_TRIGGER_GC);
1250 switch (k.k->type) {
1251 case KEY_TYPE_alloc:
1252 case KEY_TYPE_alloc_v2:
1253 ret = bch2_mark_alloc(c, old, new, fs_usage, journal_seq, flags);
1255 case KEY_TYPE_btree_ptr:
1256 case KEY_TYPE_btree_ptr_v2:
1257 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1258 ? c->opts.btree_node_size
1259 : -c->opts.btree_node_size;
1261 ret = bch2_mark_extent(c, old, new, offset, sectors,
1262 BCH_DATA_btree, fs_usage, journal_seq, flags);
1264 case KEY_TYPE_extent:
1265 case KEY_TYPE_reflink_v:
1266 ret = bch2_mark_extent(c, old, new, offset, sectors,
1267 BCH_DATA_user, fs_usage, journal_seq, flags);
1269 case KEY_TYPE_stripe:
1270 ret = bch2_mark_stripe(c, old, new, fs_usage, journal_seq, flags);
1272 case KEY_TYPE_inode:
1273 fs_usage->nr_inodes += new.k->type == KEY_TYPE_inode;
1274 fs_usage->nr_inodes -= old.k->type == KEY_TYPE_inode;
1276 case KEY_TYPE_reservation: {
1277 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1279 sectors *= replicas;
1280 replicas = clamp_t(unsigned, replicas, 1,
1281 ARRAY_SIZE(fs_usage->persistent_reserved));
1283 fs_usage->reserved += sectors;
1284 fs_usage->persistent_reserved[replicas - 1] += sectors;
1294 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c new,
1295 unsigned offset, s64 sectors,
1296 struct bch_fs_usage *fs_usage,
1297 u64 journal_seq, unsigned flags)
1299 struct bkey deleted;
1300 struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
1303 bkey_init(&deleted);
1305 percpu_down_read(&c->mark_lock);
1306 ret = bch2_mark_key_locked(c, old, new, offset, sectors,
1307 fs_usage, journal_seq,
1308 BTREE_TRIGGER_INSERT|flags);
1309 percpu_up_read(&c->mark_lock);
1314 int bch2_mark_update(struct btree_trans *trans,
1315 struct btree_iter *iter,
1317 struct bch_fs_usage *fs_usage,
1320 struct bch_fs *c = trans->c;
1321 struct btree *b = iter_l(iter)->b;
1322 struct btree_node_iter node_iter = iter_l(iter)->iter;
1323 struct bkey_packed *_old;
1324 struct bkey_s_c old;
1325 struct bkey unpacked;
1328 if (unlikely(flags & BTREE_TRIGGER_NORUN))
1331 if (!btree_node_type_needs_gc(iter->btree_id))
1334 bkey_init(&unpacked);
1335 old = (struct bkey_s_c) { &unpacked, NULL };
1337 if (!btree_node_type_is_extents(iter->btree_id)) {
1338 /* iterators should be uptodate, shouldn't get errors here: */
1339 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
1340 old = bch2_btree_iter_peek_slot(iter);
1341 BUG_ON(bkey_err(old));
1343 struct bkey_cached *ck = (void *) iter->l[0].b;
1346 old = bkey_i_to_s_c(ck->k);
1349 if (old.k->type == new->k.type) {
1350 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1351 fs_usage, trans->journal_res.seq,
1352 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1355 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1356 fs_usage, trans->journal_res.seq,
1357 BTREE_TRIGGER_INSERT|flags);
1358 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1359 fs_usage, trans->journal_res.seq,
1360 BTREE_TRIGGER_OVERWRITE|flags);
1363 BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
1364 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1366 fs_usage, trans->journal_res.seq,
1367 BTREE_TRIGGER_INSERT|flags);
1369 while ((_old = bch2_btree_node_iter_peek(&node_iter, b))) {
1370 unsigned offset = 0;
1373 old = bkey_disassemble(b, _old, &unpacked);
1374 sectors = -((s64) old.k->size);
1376 flags |= BTREE_TRIGGER_OVERWRITE;
1378 if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
1381 switch (bch2_extent_overlap(&new->k, old.k)) {
1382 case BCH_EXTENT_OVERLAP_ALL:
1384 sectors = -((s64) old.k->size);
1386 case BCH_EXTENT_OVERLAP_BACK:
1387 offset = bkey_start_offset(&new->k) -
1388 bkey_start_offset(old.k);
1389 sectors = bkey_start_offset(&new->k) -
1392 case BCH_EXTENT_OVERLAP_FRONT:
1394 sectors = bkey_start_offset(old.k) -
1397 case BCH_EXTENT_OVERLAP_MIDDLE:
1398 offset = bkey_start_offset(&new->k) -
1399 bkey_start_offset(old.k);
1400 sectors = -((s64) new->k.size);
1401 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1405 BUG_ON(sectors >= 0);
1407 ret = bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1408 offset, sectors, fs_usage,
1409 trans->journal_res.seq, flags) ?: 1;
1413 bch2_btree_node_iter_advance(&node_iter, b);
1420 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1421 struct bch_fs_usage *fs_usage)
1423 struct bch_fs *c = trans->c;
1424 struct btree_insert_entry *i;
1425 static int warned_disk_usage = 0;
1426 u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1429 if (!bch2_fs_usage_apply(c, fs_usage, trans->disk_res,
1430 trans->journal_res.seq) ||
1431 warned_disk_usage ||
1432 xchg(&warned_disk_usage, 1))
1435 bch_err(c, "disk usage increased more than %llu sectors reserved",
1438 trans_for_each_update(trans, i) {
1439 pr_err("while inserting");
1440 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1442 pr_err("overlapping with");
1444 if (btree_iter_type(i->iter) != BTREE_ITER_CACHED) {
1445 struct btree *b = iter_l(i->iter)->b;
1446 struct btree_node_iter node_iter = iter_l(i->iter)->iter;
1447 struct bkey_packed *_k;
1449 while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1450 struct bkey unpacked;
1453 pr_info("_k %px format %u", _k, _k->format);
1454 k = bkey_disassemble(b, _k, &unpacked);
1456 if (btree_node_is_extents(b)
1457 ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
1458 : bkey_cmp(i->k->k.p, k.k->p))
1461 bch2_bkey_val_to_text(&PBUF(buf), c, k);
1464 bch2_btree_node_iter_advance(&node_iter, b);
1467 struct bkey_cached *ck = (void *) i->iter->l[0].b;
1470 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
1479 static struct btree_iter *trans_get_update(struct btree_trans *trans,
1480 enum btree_id btree_id, struct bpos pos,
1483 struct btree_insert_entry *i;
1485 trans_for_each_update(trans, i)
1486 if (i->iter->btree_id == btree_id &&
1487 (btree_node_type_is_extents(btree_id)
1488 ? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
1489 bkey_cmp(pos, i->k->k.p) < 0
1490 : !bkey_cmp(pos, i->iter->pos))) {
1491 *k = bkey_i_to_s_c(i->k);
1498 static int trans_get_key(struct btree_trans *trans,
1499 enum btree_id btree_id, struct bpos pos,
1500 struct btree_iter **iter,
1503 unsigned flags = btree_id != BTREE_ID_ALLOC
1505 : BTREE_ITER_CACHED;
1508 *iter = trans_get_update(trans, btree_id, pos, k);
1512 *iter = bch2_trans_get_iter(trans, btree_id, pos,
1513 flags|BTREE_ITER_INTENT);
1514 *k = __bch2_btree_iter_peek(*iter, flags);
1517 bch2_trans_iter_put(trans, *iter);
1521 static struct bkey_alloc_buf *
1522 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter **_iter,
1523 const struct bch_extent_ptr *ptr,
1524 struct bkey_alloc_unpacked *u)
1526 struct bch_fs *c = trans->c;
1527 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1528 struct bpos pos = POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
1530 struct btree_iter *iter;
1532 struct bkey_alloc_buf *a;
1535 a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
1539 iter = trans_get_update(trans, BTREE_ID_ALLOC, pos, &k);
1541 *u = bch2_alloc_unpack(k);
1543 iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, pos,
1545 BTREE_ITER_CACHED_NOFILL|
1547 ret = bch2_btree_iter_traverse(iter);
1549 bch2_trans_iter_put(trans, iter);
1550 return ERR_PTR(ret);
1553 percpu_down_read(&c->mark_lock);
1554 g = bucket(ca, pos.offset);
1555 *u = alloc_mem_to_key(iter, g, READ_ONCE(g->mark));
1556 percpu_up_read(&c->mark_lock);
1563 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1564 struct bkey_s_c k, struct extent_ptr_decoded p,
1565 s64 sectors, enum bch_data_type data_type)
1567 struct bch_fs *c = trans->c;
1568 struct btree_iter *iter;
1569 struct bkey_alloc_unpacked u;
1570 struct bkey_alloc_buf *a;
1573 a = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
1577 ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, u.gen, &u.data_type,
1578 &u.dirty_sectors, &u.cached_sectors);
1582 bch2_alloc_pack(c, a, u);
1583 bch2_trans_update(trans, iter, &a->k, 0);
1585 bch2_trans_iter_put(trans, iter);
1589 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1590 struct extent_ptr_decoded p,
1591 s64 sectors, enum bch_data_type data_type)
1593 struct bch_fs *c = trans->c;
1594 struct btree_iter *iter;
1596 struct bkey_i_stripe *s;
1597 struct bch_replicas_padded r;
1600 ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.ec.idx), &iter, &k);
1604 if (k.k->type != KEY_TYPE_stripe) {
1605 bch2_fs_inconsistent(c,
1606 "pointer to nonexistent stripe %llu",
1612 if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) {
1613 bch2_fs_inconsistent(c,
1614 "stripe pointer doesn't match stripe %llu",
1620 s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1621 ret = PTR_ERR_OR_ZERO(s);
1625 bkey_reassemble(&s->k_i, k);
1626 stripe_blockcount_set(&s->v, p.ec.block,
1627 stripe_blockcount_get(&s->v, p.ec.block) +
1629 bch2_trans_update(trans, iter, &s->k_i, 0);
1631 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1632 r.e.data_type = data_type;
1633 update_replicas_list(trans, &r.e, sectors);
1635 bch2_trans_iter_put(trans, iter);
1639 static int bch2_trans_mark_extent(struct btree_trans *trans,
1640 struct bkey_s_c k, unsigned offset,
1641 s64 sectors, unsigned flags,
1642 enum bch_data_type data_type)
1644 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1645 const union bch_extent_entry *entry;
1646 struct extent_ptr_decoded p;
1647 struct bch_replicas_padded r;
1648 s64 dirty_sectors = 0;
1652 r.e.data_type = data_type;
1654 r.e.nr_required = 1;
1658 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1659 s64 disk_sectors = data_type == BCH_DATA_btree
1661 : ptr_disk_sectors_delta(p, offset, sectors, flags);
1663 ret = bch2_trans_mark_pointer(trans, k, p, disk_sectors,
1672 update_cached_sectors_list(trans, p.ptr.dev,
1674 } else if (!p.has_ec) {
1675 dirty_sectors += disk_sectors;
1676 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1678 ret = bch2_trans_mark_stripe_ptr(trans, p,
1679 disk_sectors, data_type);
1683 r.e.nr_required = 0;
1688 update_replicas_list(trans, &r.e, dirty_sectors);
1693 static int bch2_trans_mark_stripe_alloc_ref(struct btree_trans *trans,
1694 struct bkey_s_c_stripe s,
1695 unsigned idx, bool deleting)
1697 struct bch_fs *c = trans->c;
1698 const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
1699 struct bkey_alloc_buf *a;
1700 struct btree_iter *iter;
1701 struct bkey_alloc_unpacked u;
1702 bool parity = idx >= s.v->nr_blocks - s.v->nr_redundant;
1705 a = bch2_trans_start_alloc_update(trans, &iter, ptr, &u);
1710 s64 sectors = le16_to_cpu(s.v->sectors);
1715 u.dirty_sectors += sectors;
1716 u.data_type = u.dirty_sectors
1722 if (bch2_fs_inconsistent_on(u.stripe && u.stripe != s.k->p.offset, c,
1723 "bucket %llu:%llu gen %u: multiple stripes using same bucket (%u, %llu)",
1724 iter->pos.inode, iter->pos.offset, u.gen,
1725 u.stripe, s.k->p.offset)) {
1730 u.stripe = s.k->p.offset;
1731 u.stripe_redundancy = s.v->nr_redundant;
1734 u.stripe_redundancy = 0;
1737 bch2_alloc_pack(c, a, u);
1738 bch2_trans_update(trans, iter, &a->k, 0);
1740 bch2_trans_iter_put(trans, iter);
1744 static int bch2_trans_mark_stripe(struct btree_trans *trans,
1745 struct bkey_s_c old, struct bkey_s_c new,
1748 struct bkey_s_c_stripe old_s = { NULL };
1749 struct bkey_s_c_stripe new_s = { NULL };
1750 struct bch_replicas_padded r;
1754 if (old.k->type == KEY_TYPE_stripe)
1755 old_s = bkey_s_c_to_stripe(old);
1756 if (new.k->type == KEY_TYPE_stripe)
1757 new_s = bkey_s_c_to_stripe(new);
1760 * If the pointers aren't changing, we don't need to do anything:
1762 if (new_s.k && old_s.k &&
1763 new_s.v->nr_blocks == old_s.v->nr_blocks &&
1764 new_s.v->nr_redundant == old_s.v->nr_redundant &&
1765 !memcmp(old_s.v->ptrs, new_s.v->ptrs,
1766 new_s.v->nr_blocks * sizeof(struct bch_extent_ptr)))
1770 s64 sectors = le16_to_cpu(new_s.v->sectors);
1772 bch2_bkey_to_replicas(&r.e, new);
1773 update_replicas_list(trans, &r.e, sectors * new_s.v->nr_redundant);
1775 for (i = 0; i < new_s.v->nr_blocks; i++) {
1776 ret = bch2_trans_mark_stripe_alloc_ref(trans, new_s,
1784 s64 sectors = -((s64) le16_to_cpu(old_s.v->sectors));
1786 bch2_bkey_to_replicas(&r.e, old);
1787 update_replicas_list(trans, &r.e, sectors * old_s.v->nr_redundant);
1789 for (i = 0; i < old_s.v->nr_blocks; i++) {
1790 ret = bch2_trans_mark_stripe_alloc_ref(trans, old_s,
1800 static __le64 *bkey_refcount(struct bkey_i *k)
1802 switch (k->k.type) {
1803 case KEY_TYPE_reflink_v:
1804 return &bkey_i_to_reflink_v(k)->v.refcount;
1805 case KEY_TYPE_indirect_inline_data:
1806 return &bkey_i_to_indirect_inline_data(k)->v.refcount;
1812 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1813 struct bkey_s_c_reflink_p p,
1814 u64 idx, unsigned sectors,
1817 struct bch_fs *c = trans->c;
1818 struct btree_iter *iter;
1824 ret = trans_get_key(trans, BTREE_ID_REFLINK,
1825 POS(0, idx), &iter, &k);
1829 if ((flags & BTREE_TRIGGER_OVERWRITE) &&
1830 (bkey_start_offset(k.k) < idx ||
1831 k.k->p.offset > idx + sectors))
1834 sectors = k.k->p.offset - idx;
1836 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1837 ret = PTR_ERR_OR_ZERO(n);
1841 bkey_reassemble(n, k);
1843 refcount = bkey_refcount(n);
1845 bch2_fs_inconsistent(c,
1846 "%llu:%llu len %u points to nonexistent indirect extent %llu",
1847 p.k->p.inode, p.k->p.offset, p.k->size, idx);
1852 le64_add_cpu(refcount, !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1);
1855 n->k.type = KEY_TYPE_deleted;
1856 set_bkey_val_u64s(&n->k, 0);
1859 bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1860 BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1862 bch2_trans_update(trans, iter, n, 0);
1866 bch2_trans_iter_put(trans, iter);
1870 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1871 struct bkey_s_c_reflink_p p, unsigned offset,
1872 s64 sectors, unsigned flags)
1874 u64 idx = le64_to_cpu(p.v->idx) + offset;
1877 sectors = abs(sectors);
1878 BUG_ON(offset + sectors > p.k->size);
1881 ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors, flags);
1886 sectors = max_t(s64, 0LL, sectors - ret);
1893 int bch2_trans_mark_key(struct btree_trans *trans,
1894 struct bkey_s_c old,
1895 struct bkey_s_c new,
1896 unsigned offset, s64 sectors, unsigned flags)
1898 struct bch_fs *c = trans->c;
1899 struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1900 struct replicas_delta_list *d;
1902 BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1904 switch (k.k->type) {
1905 case KEY_TYPE_btree_ptr:
1906 case KEY_TYPE_btree_ptr_v2:
1907 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1908 ? c->opts.btree_node_size
1909 : -c->opts.btree_node_size;
1911 return bch2_trans_mark_extent(trans, k, offset, sectors,
1912 flags, BCH_DATA_btree);
1913 case KEY_TYPE_extent:
1914 case KEY_TYPE_reflink_v:
1915 return bch2_trans_mark_extent(trans, k, offset, sectors,
1916 flags, BCH_DATA_user);
1917 case KEY_TYPE_stripe:
1918 return bch2_trans_mark_stripe(trans, old, new, flags);
1919 case KEY_TYPE_inode: {
1920 int nr = (new.k->type == KEY_TYPE_inode) -
1921 (old.k->type == KEY_TYPE_inode);
1924 d = replicas_deltas_realloc(trans, 0);
1930 case KEY_TYPE_reservation: {
1931 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1933 d = replicas_deltas_realloc(trans, 0);
1935 sectors *= replicas;
1936 replicas = clamp_t(unsigned, replicas, 1,
1937 ARRAY_SIZE(d->persistent_reserved));
1939 d->persistent_reserved[replicas - 1] += sectors;
1942 case KEY_TYPE_reflink_p:
1943 return bch2_trans_mark_reflink_p(trans,
1944 bkey_s_c_to_reflink_p(k),
1945 offset, sectors, flags);
1951 int bch2_trans_mark_update(struct btree_trans *trans,
1952 struct btree_iter *iter,
1956 struct bkey_s_c old;
1959 if (unlikely(flags & BTREE_TRIGGER_NORUN))
1962 if (!btree_node_type_needs_gc(iter->btree_id))
1965 if (!btree_node_type_is_extents(iter->btree_id)) {
1966 /* iterators should be uptodate, shouldn't get errors here: */
1967 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
1968 old = bch2_btree_iter_peek_slot(iter);
1969 BUG_ON(bkey_err(old));
1971 struct bkey_cached *ck = (void *) iter->l[0].b;
1974 old = bkey_i_to_s_c(ck->k);
1977 if (old.k->type == new->k.type) {
1978 ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
1979 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1981 ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
1982 BTREE_TRIGGER_INSERT|flags) ?:
1983 bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
1984 BTREE_TRIGGER_OVERWRITE|flags);
1987 struct btree *b = iter_l(iter)->b;
1988 struct btree_node_iter node_iter = iter_l(iter)->iter;
1989 struct bkey_packed *_old;
1990 struct bkey unpacked;
1992 EBUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
1994 bkey_init(&unpacked);
1995 old = (struct bkey_s_c) { &unpacked, NULL };
1997 ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
1999 BTREE_TRIGGER_INSERT);
2003 while ((_old = bch2_btree_node_iter_peek(&node_iter, b))) {
2004 unsigned flags = BTREE_TRIGGER_OVERWRITE;
2005 unsigned offset = 0;
2008 old = bkey_disassemble(b, _old, &unpacked);
2009 sectors = -((s64) old.k->size);
2011 flags |= BTREE_TRIGGER_OVERWRITE;
2013 if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
2016 switch (bch2_extent_overlap(&new->k, old.k)) {
2017 case BCH_EXTENT_OVERLAP_ALL:
2019 sectors = -((s64) old.k->size);
2021 case BCH_EXTENT_OVERLAP_BACK:
2022 offset = bkey_start_offset(&new->k) -
2023 bkey_start_offset(old.k);
2024 sectors = bkey_start_offset(&new->k) -
2027 case BCH_EXTENT_OVERLAP_FRONT:
2029 sectors = bkey_start_offset(old.k) -
2032 case BCH_EXTENT_OVERLAP_MIDDLE:
2033 offset = bkey_start_offset(&new->k) -
2034 bkey_start_offset(old.k);
2035 sectors = -((s64) new->k.size);
2036 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
2040 BUG_ON(sectors >= 0);
2042 ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
2043 offset, sectors, flags);
2047 bch2_btree_node_iter_advance(&node_iter, b);
2054 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
2055 struct bch_dev *ca, size_t b,
2056 enum bch_data_type type,
2059 struct bch_fs *c = trans->c;
2060 struct btree_iter *iter;
2061 struct bkey_alloc_unpacked u;
2062 struct bkey_alloc_buf *a;
2063 struct bch_extent_ptr ptr = {
2065 .offset = bucket_to_sector(ca, b),
2069 a = bch2_trans_start_alloc_update(trans, &iter, &ptr, &u);
2073 if (u.data_type && u.data_type != type) {
2074 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
2075 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
2077 iter->pos.inode, iter->pos.offset, u.gen,
2078 bch2_data_types[u.data_type],
2079 bch2_data_types[type],
2080 bch2_data_types[type]);
2085 if ((unsigned) (u.dirty_sectors + sectors) > ca->mi.bucket_size) {
2086 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
2087 "bucket %llu:%llu gen %u data type %s sector count overflow: %u + %u > %u\n"
2089 iter->pos.inode, iter->pos.offset, u.gen,
2090 bch2_data_types[u.data_type ?: type],
2091 u.dirty_sectors, sectors, ca->mi.bucket_size,
2092 bch2_data_types[type]);
2097 if (u.data_type == type &&
2098 u.dirty_sectors == sectors)
2102 u.dirty_sectors = sectors;
2104 bch2_alloc_pack(c, a, u);
2105 bch2_trans_update(trans, iter, &a->k, 0);
2107 bch2_trans_iter_put(trans, iter);
2111 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
2112 struct disk_reservation *res,
2113 struct bch_dev *ca, size_t b,
2114 enum bch_data_type type,
2117 return __bch2_trans_do(trans, res, NULL, 0,
2118 __bch2_trans_mark_metadata_bucket(trans, ca, b, BCH_DATA_journal,
2119 ca->mi.bucket_size));
2123 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
2124 struct disk_reservation *res,
2127 enum bch_data_type type,
2128 u64 *bucket, unsigned *bucket_sectors)
2133 u64 b = sector_to_bucket(ca, start);
2135 min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
2138 if (*bucket_sectors) {
2139 ret = bch2_trans_mark_metadata_bucket(trans, res, ca,
2140 *bucket, type, *bucket_sectors);
2146 *bucket_sectors = 0;
2149 *bucket_sectors += sectors;
2151 } while (!ret && start < end);
2156 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
2157 struct disk_reservation *res,
2160 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
2162 unsigned i, bucket_sectors = 0;
2165 for (i = 0; i < layout->nr_superblocks; i++) {
2166 u64 offset = le64_to_cpu(layout->sb_offset[i]);
2168 if (offset == BCH_SB_SECTOR) {
2169 ret = bch2_trans_mark_metadata_sectors(trans, res, ca,
2171 BCH_DATA_sb, &bucket, &bucket_sectors);
2176 ret = bch2_trans_mark_metadata_sectors(trans, res, ca, offset,
2177 offset + (1 << layout->sb_max_size_bits),
2178 BCH_DATA_sb, &bucket, &bucket_sectors);
2183 if (bucket_sectors) {
2184 ret = bch2_trans_mark_metadata_bucket(trans, res, ca,
2185 bucket, BCH_DATA_sb, bucket_sectors);
2190 for (i = 0; i < ca->journal.nr; i++) {
2191 ret = bch2_trans_mark_metadata_bucket(trans, res, ca,
2192 ca->journal.buckets[i],
2193 BCH_DATA_journal, ca->mi.bucket_size);
2201 int bch2_trans_mark_dev_sb(struct bch_fs *c,
2202 struct disk_reservation *res,
2205 return bch2_trans_do(c, res, NULL, 0,
2206 __bch2_trans_mark_dev_sb(&trans, res, ca));
2209 /* Disk reservations: */
2211 void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
2213 percpu_down_read(&c->mark_lock);
2214 this_cpu_sub(c->usage[0]->online_reserved,
2216 percpu_up_read(&c->mark_lock);
2221 #define SECTORS_CACHE 1024
2223 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
2224 u64 sectors, int flags)
2226 struct bch_fs_pcpu *pcpu;
2228 s64 sectors_available;
2231 percpu_down_read(&c->mark_lock);
2233 pcpu = this_cpu_ptr(c->pcpu);
2235 if (sectors <= pcpu->sectors_available)
2238 v = atomic64_read(&c->sectors_available);
2241 get = min((u64) sectors + SECTORS_CACHE, old);
2243 if (get < sectors) {
2247 } while ((v = atomic64_cmpxchg(&c->sectors_available,
2248 old, old - get)) != old);
2250 pcpu->sectors_available += get;
2253 pcpu->sectors_available -= sectors;
2254 this_cpu_add(c->usage[0]->online_reserved, sectors);
2255 res->sectors += sectors;
2258 percpu_up_read(&c->mark_lock);
2262 mutex_lock(&c->sectors_available_lock);
2264 percpu_u64_set(&c->pcpu->sectors_available, 0);
2265 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
2267 if (sectors <= sectors_available ||
2268 (flags & BCH_DISK_RESERVATION_NOFAIL)) {
2269 atomic64_set(&c->sectors_available,
2270 max_t(s64, 0, sectors_available - sectors));
2271 this_cpu_add(c->usage[0]->online_reserved, sectors);
2272 res->sectors += sectors;
2275 atomic64_set(&c->sectors_available, sectors_available);
2279 mutex_unlock(&c->sectors_available_lock);
2280 percpu_up_read(&c->mark_lock);
2285 /* Startup/shutdown: */
2287 static void buckets_free_rcu(struct rcu_head *rcu)
2289 struct bucket_array *buckets =
2290 container_of(rcu, struct bucket_array, rcu);
2293 sizeof(struct bucket_array) +
2294 buckets->nbuckets * sizeof(struct bucket));
2297 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2299 struct bucket_array *buckets = NULL, *old_buckets = NULL;
2300 unsigned long *buckets_nouse = NULL;
2301 alloc_fifo free[RESERVE_NR];
2302 alloc_fifo free_inc;
2303 alloc_heap alloc_heap;
2305 size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
2306 ca->mi.bucket_size / c->opts.btree_node_size);
2307 /* XXX: these should be tunable */
2308 size_t reserve_none = max_t(size_t, 1, nbuckets >> 9);
2309 size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 6);
2310 size_t free_inc_nr = max(max_t(size_t, 1, nbuckets >> 12),
2312 bool resize = ca->buckets[0] != NULL;
2316 memset(&free, 0, sizeof(free));
2317 memset(&free_inc, 0, sizeof(free_inc));
2318 memset(&alloc_heap, 0, sizeof(alloc_heap));
2320 if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
2321 nbuckets * sizeof(struct bucket),
2322 GFP_KERNEL|__GFP_ZERO)) ||
2323 !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2324 sizeof(unsigned long),
2325 GFP_KERNEL|__GFP_ZERO)) ||
2326 !init_fifo(&free[RESERVE_MOVINGGC],
2327 copygc_reserve, GFP_KERNEL) ||
2328 !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
2329 !init_fifo(&free_inc, free_inc_nr, GFP_KERNEL) ||
2330 !init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
2333 buckets->first_bucket = ca->mi.first_bucket;
2334 buckets->nbuckets = nbuckets;
2336 bch2_copygc_stop(c);
2339 down_write(&c->gc_lock);
2340 down_write(&ca->bucket_lock);
2341 percpu_down_write(&c->mark_lock);
2344 old_buckets = bucket_array(ca);
2347 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
2351 n * sizeof(struct bucket));
2352 memcpy(buckets_nouse,
2354 BITS_TO_LONGS(n) * sizeof(unsigned long));
2357 rcu_assign_pointer(ca->buckets[0], buckets);
2358 buckets = old_buckets;
2360 swap(ca->buckets_nouse, buckets_nouse);
2363 percpu_up_write(&c->mark_lock);
2364 up_write(&c->gc_lock);
2367 spin_lock(&c->freelist_lock);
2368 for (i = 0; i < RESERVE_NR; i++) {
2369 fifo_move(&free[i], &ca->free[i]);
2370 swap(ca->free[i], free[i]);
2372 fifo_move(&free_inc, &ca->free_inc);
2373 swap(ca->free_inc, free_inc);
2374 spin_unlock(&c->freelist_lock);
2376 /* with gc lock held, alloc_heap can't be in use: */
2377 swap(ca->alloc_heap, alloc_heap);
2379 nbuckets = ca->mi.nbuckets;
2382 up_write(&ca->bucket_lock);
2386 free_heap(&alloc_heap);
2387 free_fifo(&free_inc);
2388 for (i = 0; i < RESERVE_NR; i++)
2389 free_fifo(&free[i]);
2390 kvpfree(buckets_nouse,
2391 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2393 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2398 void bch2_dev_buckets_free(struct bch_dev *ca)
2402 free_heap(&ca->alloc_heap);
2403 free_fifo(&ca->free_inc);
2404 for (i = 0; i < RESERVE_NR; i++)
2405 free_fifo(&ca->free[i]);
2406 kvpfree(ca->buckets_nouse,
2407 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2408 kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2409 sizeof(struct bucket_array) +
2410 ca->mi.nbuckets * sizeof(struct bucket));
2412 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
2413 free_percpu(ca->usage[i]);
2414 kfree(ca->usage_base);
2417 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2421 ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
2422 if (!ca->usage_base)
2425 for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
2426 ca->usage[i] = alloc_percpu(struct bch_dev_usage);
2431 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;