1 // SPDX-License-Identifier: GPL-2.0
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
8 * - free bucket: mark == 0
9 * The bucket contains no data and will not be read
11 * - allocator bucket: owned_by_allocator == 1
12 * The bucket is on a free list, or it is an open bucket
14 * - cached bucket: owned_by_allocator == 0 &&
15 * dirty_sectors == 0 &&
17 * The bucket contains data but may be safely discarded as there are
18 * enough replicas of the data on other cache devices, or it has been
19 * written back to the backing device
21 * - dirty bucket: owned_by_allocator == 0 &&
23 * The bucket contains data that we must not discard (either only copy,
24 * or one of the 'main copies' for data requiring multiple replicas)
26 * - metadata bucket: owned_by_allocator == 0 && is_metadata == 1
27 * This is a btree node, journal or gen/prio bucket
31 * bucket invalidated => bucket on freelist => open bucket =>
32 * [dirty bucket =>] cached bucket => bucket invalidated => ...
34 * Note that cache promotion can skip the dirty bucket step, as data
35 * is copied from a deeper tier to a shallower tier, onto a cached
37 * Note also that a cached bucket can spontaneously become dirty --
40 * Only a traversal of the key space can determine whether a bucket is
41 * truly dirty or cached.
45 * - free => allocator: bucket was invalidated
46 * - cached => allocator: bucket was invalidated
48 * - allocator => dirty: open bucket was filled up
49 * - allocator => cached: open bucket was filled up
50 * - allocator => metadata: metadata was allocated
52 * - dirty => cached: dirty sectors were copied to a deeper tier
53 * - dirty => free: dirty sectors were overwritten or moved (copy gc)
54 * - cached => free: cached sectors were overwritten
56 * - metadata => free: metadata was freed
59 * - cached => dirty: a device was removed so formerly replicated data
60 * is no longer sufficiently replicated
61 * - free => cached: cannot happen
62 * - free => dirty: cannot happen
63 * - free => metadata: cannot happen
67 #include "alloc_background.h"
70 #include "btree_update.h"
77 #include <linux/preempt.h>
78 #include <trace/events/bcachefs.h>
81 * Clear journal_seq_valid for buckets for which it's not needed, to prevent
84 void bch2_bucket_seq_cleanup(struct bch_fs *c)
86 u64 journal_seq = atomic64_read(&c->journal.seq);
87 u16 last_seq_ondisk = c->journal.last_seq_ondisk;
89 struct bucket_array *buckets;
94 if (journal_seq - c->last_bucket_seq_cleanup <
95 (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
98 c->last_bucket_seq_cleanup = journal_seq;
100 for_each_member_device(ca, c, i) {
101 down_read(&ca->bucket_lock);
102 buckets = bucket_array(ca);
104 for_each_bucket(g, buckets) {
105 bucket_cmpxchg(g, m, ({
106 if (!m.journal_seq_valid ||
107 bucket_needs_journal_commit(m, last_seq_ondisk))
110 m.journal_seq_valid = 0;
113 up_read(&ca->bucket_lock);
117 void bch2_fs_usage_initialize(struct bch_fs *c)
119 struct bch_fs_usage *usage;
122 percpu_down_write(&c->mark_lock);
123 usage = c->usage_base;
125 bch2_fs_usage_acc_to_base(c, 0);
126 bch2_fs_usage_acc_to_base(c, 1);
128 for (i = 0; i < BCH_REPLICAS_MAX; i++)
129 usage->reserved += usage->persistent_reserved[i];
131 for (i = 0; i < c->replicas.nr; i++) {
132 struct bch_replicas_entry *e =
133 cpu_replicas_entry(&c->replicas, i);
135 switch (e->data_type) {
137 usage->btree += usage->replicas[i];
140 usage->data += usage->replicas[i];
142 case BCH_DATA_cached:
143 usage->cached += usage->replicas[i];
148 percpu_up_write(&c->mark_lock);
151 void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage *fs_usage)
153 if (fs_usage == c->usage_scratch)
154 mutex_unlock(&c->usage_scratch_lock);
159 struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *c)
161 struct bch_fs_usage *ret;
162 unsigned bytes = fs_usage_u64s(c) * sizeof(u64);
164 ret = kzalloc(bytes, GFP_NOWAIT|__GFP_NOWARN);
168 if (mutex_trylock(&c->usage_scratch_lock))
171 ret = kzalloc(bytes, GFP_NOFS);
175 mutex_lock(&c->usage_scratch_lock);
177 ret = c->usage_scratch;
178 memset(ret, 0, bytes);
182 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
184 struct bch_dev_usage ret;
186 memset(&ret, 0, sizeof(ret));
187 acc_u64s_percpu((u64 *) &ret,
188 (u64 __percpu *) ca->usage[0],
189 sizeof(ret) / sizeof(u64));
194 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
195 unsigned journal_seq,
198 return this_cpu_ptr(gc
200 : c->usage[journal_seq & 1]);
203 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
205 ssize_t offset = v - (u64 *) c->usage_base;
209 BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
210 percpu_rwsem_assert_held(&c->mark_lock);
213 seq = read_seqcount_begin(&c->usage_lock);
215 percpu_u64_get((u64 __percpu *) c->usage[0] + offset) +
216 percpu_u64_get((u64 __percpu *) c->usage[1] + offset);
217 } while (read_seqcount_retry(&c->usage_lock, seq));
222 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
224 struct bch_fs_usage *ret;
225 unsigned seq, v, u64s = fs_usage_u64s(c);
227 ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
231 percpu_down_read(&c->mark_lock);
233 v = fs_usage_u64s(c);
234 if (unlikely(u64s != v)) {
236 percpu_up_read(&c->mark_lock);
242 seq = read_seqcount_begin(&c->usage_lock);
243 memcpy(ret, c->usage_base, u64s * sizeof(u64));
244 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[0], u64s);
245 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[1], u64s);
246 } while (read_seqcount_retry(&c->usage_lock, seq));
251 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
253 unsigned u64s = fs_usage_u64s(c);
258 write_seqcount_begin(&c->usage_lock);
260 acc_u64s_percpu((u64 *) c->usage_base,
261 (u64 __percpu *) c->usage[idx], u64s);
262 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
264 write_seqcount_end(&c->usage_lock);
268 void bch2_fs_usage_to_text(struct printbuf *out,
270 struct bch_fs_usage *fs_usage)
274 pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
276 pr_buf(out, "hidden:\t\t\t\t%llu\n",
278 pr_buf(out, "data:\t\t\t\t%llu\n",
280 pr_buf(out, "cached:\t\t\t\t%llu\n",
282 pr_buf(out, "reserved:\t\t\t%llu\n",
284 pr_buf(out, "nr_inodes:\t\t\t%llu\n",
285 fs_usage->nr_inodes);
286 pr_buf(out, "online reserved:\t\t%llu\n",
287 fs_usage->online_reserved);
290 i < ARRAY_SIZE(fs_usage->persistent_reserved);
292 pr_buf(out, "%u replicas:\n", i + 1);
293 pr_buf(out, "\treserved:\t\t%llu\n",
294 fs_usage->persistent_reserved[i]);
297 for (i = 0; i < c->replicas.nr; i++) {
298 struct bch_replicas_entry *e =
299 cpu_replicas_entry(&c->replicas, i);
302 bch2_replicas_entry_to_text(out, e);
303 pr_buf(out, ":\t%llu\n", fs_usage->replicas[i]);
307 #define RESERVE_FACTOR 6
309 static u64 reserve_factor(u64 r)
311 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
314 static u64 avail_factor(u64 r)
316 return (r << RESERVE_FACTOR) / ((1 << RESERVE_FACTOR) + 1);
319 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage *fs_usage)
321 return min(fs_usage->hidden +
324 reserve_factor(fs_usage->reserved +
325 fs_usage->online_reserved),
329 static struct bch_fs_usage_short
330 __bch2_fs_usage_read_short(struct bch_fs *c)
332 struct bch_fs_usage_short ret;
335 ret.capacity = c->capacity -
336 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
338 data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
339 bch2_fs_usage_read_one(c, &c->usage_base->btree);
340 reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
341 bch2_fs_usage_read_one(c, &c->usage_base->online_reserved);
343 ret.used = min(ret.capacity, data + reserve_factor(reserved));
344 ret.free = ret.capacity - ret.used;
346 ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
351 struct bch_fs_usage_short
352 bch2_fs_usage_read_short(struct bch_fs *c)
354 struct bch_fs_usage_short ret;
356 percpu_down_read(&c->mark_lock);
357 ret = __bch2_fs_usage_read_short(c);
358 percpu_up_read(&c->mark_lock);
363 static inline int is_unavailable_bucket(struct bucket_mark m)
365 return !is_available_bucket(m);
368 static inline int is_fragmented_bucket(struct bucket_mark m,
371 if (!m.owned_by_allocator &&
372 m.data_type == BCH_DATA_user &&
373 bucket_sectors_used(m))
374 return max_t(int, 0, (int) ca->mi.bucket_size -
375 bucket_sectors_used(m));
379 static inline int bucket_stripe_sectors(struct bucket_mark m)
381 return m.stripe ? m.dirty_sectors : 0;
384 static inline enum bch_data_type bucket_type(struct bucket_mark m)
386 return m.cached_sectors && !m.dirty_sectors
391 static bool bucket_became_unavailable(struct bucket_mark old,
392 struct bucket_mark new)
394 return is_available_bucket(old) &&
395 !is_available_bucket(new);
398 int bch2_fs_usage_apply(struct bch_fs *c,
399 struct bch_fs_usage *fs_usage,
400 struct disk_reservation *disk_res,
401 unsigned journal_seq)
403 s64 added = fs_usage->data + fs_usage->reserved;
404 s64 should_not_have_added;
407 percpu_rwsem_assert_held(&c->mark_lock);
410 * Not allowed to reduce sectors_available except by getting a
413 should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
414 if (WARN_ONCE(should_not_have_added > 0,
415 "disk usage increased by %lli without a reservation",
416 should_not_have_added)) {
417 atomic64_sub(should_not_have_added, &c->sectors_available);
418 added -= should_not_have_added;
423 disk_res->sectors -= added;
424 fs_usage->online_reserved -= added;
428 acc_u64s((u64 *) fs_usage_ptr(c, journal_seq, false),
429 (u64 *) fs_usage, fs_usage_u64s(c));
435 static inline void account_bucket(struct bch_fs_usage *fs_usage,
436 struct bch_dev_usage *dev_usage,
437 enum bch_data_type type,
440 if (type == BCH_DATA_sb || type == BCH_DATA_journal)
441 fs_usage->hidden += size;
443 dev_usage->buckets[type] += nr;
446 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
447 struct bch_fs_usage *fs_usage,
448 struct bucket_mark old, struct bucket_mark new,
451 struct bch_dev_usage *u;
453 percpu_rwsem_assert_held(&c->mark_lock);
456 u = this_cpu_ptr(ca->usage[gc]);
458 if (bucket_type(old))
459 account_bucket(fs_usage, u, bucket_type(old),
460 -1, -ca->mi.bucket_size);
462 if (bucket_type(new))
463 account_bucket(fs_usage, u, bucket_type(new),
464 1, ca->mi.bucket_size);
467 (int) new.owned_by_allocator - (int) old.owned_by_allocator;
468 u->buckets_unavailable +=
469 is_unavailable_bucket(new) - is_unavailable_bucket(old);
471 u->buckets_ec += (int) new.stripe - (int) old.stripe;
472 u->sectors_ec += bucket_stripe_sectors(new) -
473 bucket_stripe_sectors(old);
475 u->sectors[old.data_type] -= old.dirty_sectors;
476 u->sectors[new.data_type] += new.dirty_sectors;
477 u->sectors[BCH_DATA_cached] +=
478 (int) new.cached_sectors - (int) old.cached_sectors;
479 u->sectors_fragmented +=
480 is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
483 if (!is_available_bucket(old) && is_available_bucket(new))
484 bch2_wake_allocator(ca);
488 void bch2_dev_usage_from_buckets(struct bch_fs *c)
491 struct bucket_mark old = { .v.counter = 0 };
492 struct bucket_array *buckets;
497 c->usage_base->hidden = 0;
499 for_each_member_device(ca, c, i) {
500 for_each_possible_cpu(cpu)
501 memset(per_cpu_ptr(ca->usage[0], cpu), 0,
502 sizeof(*ca->usage[0]));
504 buckets = bucket_array(ca);
506 for_each_bucket(g, buckets)
507 bch2_dev_usage_update(c, ca, c->usage_base,
508 old, g->mark, false);
512 static inline int update_replicas(struct bch_fs *c,
513 struct bch_fs_usage *fs_usage,
514 struct bch_replicas_entry *r,
517 int idx = bch2_replicas_entry_idx(c, r);
525 switch (r->data_type) {
527 fs_usage->btree += sectors;
530 fs_usage->data += sectors;
532 case BCH_DATA_cached:
533 fs_usage->cached += sectors;
536 fs_usage->replicas[idx] += sectors;
540 static inline void update_cached_sectors(struct bch_fs *c,
541 struct bch_fs_usage *fs_usage,
542 unsigned dev, s64 sectors)
544 struct bch_replicas_padded r;
546 bch2_replicas_entry_cached(&r.e, dev);
548 update_replicas(c, fs_usage, &r.e, sectors);
551 static struct replicas_delta_list *
552 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
554 struct replicas_delta_list *d = trans->fs_usage_deltas;
555 unsigned new_size = d ? (d->size + more) * 2 : 128;
557 if (!d || d->used + more > d->size) {
558 d = krealloc(d, sizeof(*d) + new_size, GFP_NOIO|__GFP_ZERO);
562 trans->fs_usage_deltas = d;
567 static inline void update_replicas_list(struct btree_trans *trans,
568 struct bch_replicas_entry *r,
571 struct replicas_delta_list *d;
572 struct replicas_delta *n;
578 b = replicas_entry_bytes(r) + 8;
579 d = replicas_deltas_realloc(trans, b);
581 n = (void *) d->d + d->used;
583 memcpy(&n->r, r, replicas_entry_bytes(r));
587 static inline void update_cached_sectors_list(struct btree_trans *trans,
588 unsigned dev, s64 sectors)
590 struct bch_replicas_padded r;
592 bch2_replicas_entry_cached(&r.e, dev);
594 update_replicas_list(trans, &r.e, sectors);
597 static inline struct replicas_delta *
598 replicas_delta_next(struct replicas_delta *d)
600 return (void *) d + replicas_entry_bytes(&d->r) + 8;
603 int bch2_replicas_delta_list_apply(struct bch_fs *c,
604 struct bch_fs_usage *fs_usage,
605 struct replicas_delta_list *r)
607 struct replicas_delta *d = r->d;
608 struct replicas_delta *top = (void *) r->d + r->used;
611 for (d = r->d; d != top; d = replicas_delta_next(d))
612 if (update_replicas(c, fs_usage, &d->r, d->delta)) {
620 fs_usage->nr_inodes += r->nr_inodes;
622 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
623 fs_usage->reserved += r->persistent_reserved[i];
624 fs_usage->persistent_reserved[i] += r->persistent_reserved[i];
629 for (d = r->d; d != top; d = replicas_delta_next(d))
630 update_replicas(c, fs_usage, &d->r, -d->delta);
634 #define do_mark_fn(fn, c, pos, flags, ...) \
638 percpu_rwsem_assert_held(&c->mark_lock); \
640 for (gc = 0; gc < 2 && !ret; gc++) \
641 if (!gc == !(flags & BTREE_TRIGGER_GC) || \
642 (gc && gc_visited(c, pos))) \
643 ret = fn(c, __VA_ARGS__, gc); \
647 static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
648 size_t b, struct bucket_mark *ret,
651 struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
652 struct bucket *g = __bucket(ca, b, gc);
653 struct bucket_mark old, new;
655 old = bucket_cmpxchg(g, new, ({
656 BUG_ON(!is_available_bucket(new));
658 new.owned_by_allocator = true;
660 new.cached_sectors = 0;
661 new.dirty_sectors = 0;
665 bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
667 if (old.cached_sectors)
668 update_cached_sectors(c, fs_usage, ca->dev_idx,
669 -((s64) old.cached_sectors));
676 void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
677 size_t b, struct bucket_mark *old)
679 do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
682 if (!old->owned_by_allocator && old->cached_sectors)
683 trace_invalidate(ca, bucket_to_sector(ca, b),
684 old->cached_sectors);
687 static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
688 size_t b, bool owned_by_allocator,
691 struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
692 struct bucket *g = __bucket(ca, b, gc);
693 struct bucket_mark old, new;
695 old = bucket_cmpxchg(g, new, ({
696 new.owned_by_allocator = owned_by_allocator;
699 bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
702 !owned_by_allocator && !old.owned_by_allocator);
707 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
708 size_t b, bool owned_by_allocator,
709 struct gc_pos pos, unsigned flags)
713 do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
714 ca, b, owned_by_allocator);
719 static int bch2_mark_alloc(struct bch_fs *c,
720 struct bkey_s_c old, struct bkey_s_c new,
721 struct bch_fs_usage *fs_usage,
722 u64 journal_seq, unsigned flags)
724 bool gc = flags & BTREE_TRIGGER_GC;
725 struct bkey_alloc_unpacked u;
728 struct bucket_mark old_m, m;
730 /* We don't do anything for deletions - do we?: */
731 if (new.k->type != KEY_TYPE_alloc)
735 * alloc btree is read in by bch2_alloc_read, not gc:
737 if ((flags & BTREE_TRIGGER_GC) &&
738 !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
741 ca = bch_dev_bkey_exists(c, new.k->p.inode);
743 if (new.k->p.offset >= ca->mi.nbuckets)
746 g = __bucket(ca, new.k->p.offset, gc);
747 u = bch2_alloc_unpack(new);
749 old_m = bucket_cmpxchg(g, m, ({
751 m.data_type = u.data_type;
752 m.dirty_sectors = u.dirty_sectors;
753 m.cached_sectors = u.cached_sectors;
756 m.journal_seq_valid = 1;
757 m.journal_seq = journal_seq;
761 bch2_dev_usage_update(c, ca, fs_usage, old_m, m, gc);
763 g->io_time[READ] = u.read_time;
764 g->io_time[WRITE] = u.write_time;
765 g->oldest_gen = u.oldest_gen;
769 * need to know if we're getting called from the invalidate path or
773 if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
774 old_m.cached_sectors) {
775 update_cached_sectors(c, fs_usage, ca->dev_idx,
776 -old_m.cached_sectors);
777 trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
778 old_m.cached_sectors);
784 #define checked_add(a, b) \
786 unsigned _res = (unsigned) (a) + (b); \
787 bool overflow = _res > U16_MAX; \
794 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
795 size_t b, enum bch_data_type data_type,
796 unsigned sectors, bool gc)
798 struct bucket *g = __bucket(ca, b, gc);
799 struct bucket_mark old, new;
802 BUG_ON(data_type != BCH_DATA_sb &&
803 data_type != BCH_DATA_journal);
805 old = bucket_cmpxchg(g, new, ({
806 new.data_type = data_type;
807 overflow = checked_add(new.dirty_sectors, sectors);
810 bch2_fs_inconsistent_on(old.data_type &&
811 old.data_type != data_type, c,
812 "different types of data in same bucket: %s, %s",
813 bch2_data_types[old.data_type],
814 bch2_data_types[data_type]);
816 bch2_fs_inconsistent_on(overflow, c,
817 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
818 ca->dev_idx, b, new.gen,
819 bch2_data_types[old.data_type ?: data_type],
820 old.dirty_sectors, sectors);
823 bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc),
829 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
830 size_t b, enum bch_data_type type,
831 unsigned sectors, struct gc_pos pos,
834 BUG_ON(type != BCH_DATA_sb &&
835 type != BCH_DATA_journal);
840 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
841 ca, b, type, sectors);
843 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
849 static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
851 return DIV_ROUND_UP(sectors * n, d);
854 static s64 __ptr_disk_sectors_delta(unsigned old_size,
855 unsigned offset, s64 delta,
857 unsigned n, unsigned d)
861 if (flags & BTREE_TRIGGER_OVERWRITE_SPLIT) {
862 BUG_ON(offset + -delta > old_size);
864 return -disk_sectors_scaled(n, d, old_size) +
865 disk_sectors_scaled(n, d, offset) +
866 disk_sectors_scaled(n, d, old_size - offset + delta);
867 } else if (flags & BTREE_TRIGGER_OVERWRITE) {
868 BUG_ON(offset + -delta > old_size);
870 return -disk_sectors_scaled(n, d, old_size) +
871 disk_sectors_scaled(n, d, old_size + delta);
873 return disk_sectors_scaled(n, d, delta);
877 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
878 unsigned offset, s64 delta,
881 return __ptr_disk_sectors_delta(p.crc.live_size,
882 offset, delta, flags,
883 p.crc.compressed_size,
884 p.crc.uncompressed_size);
887 static void bucket_set_stripe(struct bch_fs *c,
888 const struct bch_extent_ptr *ptr,
889 struct bch_fs_usage *fs_usage,
894 bool gc = flags & BTREE_TRIGGER_GC;
895 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
896 struct bucket *g = PTR_BUCKET(ca, ptr, gc);
897 struct bucket_mark new, old;
899 old = bucket_cmpxchg(g, new, ({
900 new.stripe = enabled;
902 new.journal_seq_valid = 1;
903 new.journal_seq = journal_seq;
907 bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
910 * XXX write repair code for these, flag stripe as possibly bad
912 if (old.gen != ptr->gen)
913 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
914 "stripe with stale pointer");
917 * We'd like to check for these, but these checks don't work
920 if (old.stripe && enabled)
921 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
922 "multiple stripes using same bucket");
924 if (!old.stripe && !enabled)
925 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
926 "deleting stripe but bucket not marked as stripe bucket");
930 static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
931 struct extent_ptr_decoded p,
932 s64 sectors, enum bch_data_type ptr_data_type,
933 u8 bucket_gen, u8 *bucket_data_type,
934 u16 *dirty_sectors, u16 *cached_sectors)
936 u16 *dst_sectors = !p.ptr.cached
939 u16 orig_sectors = *dst_sectors;
942 if (gen_after(p.ptr.gen, bucket_gen)) {
943 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
944 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
946 p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
948 bch2_data_types[*bucket_data_type ?: ptr_data_type],
950 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
954 if (gen_cmp(bucket_gen, p.ptr.gen) > 96U) {
955 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
956 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
958 p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
960 bch2_data_types[*bucket_data_type ?: ptr_data_type],
962 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
966 if (bucket_gen != p.ptr.gen && !p.ptr.cached) {
967 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
968 "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
970 p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
972 bch2_data_types[*bucket_data_type ?: ptr_data_type],
974 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
978 if (bucket_gen != p.ptr.gen)
981 if (*bucket_data_type && *bucket_data_type != ptr_data_type) {
982 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
983 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
985 p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
987 bch2_data_types[*bucket_data_type],
988 bch2_data_types[ptr_data_type],
989 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
993 if (checked_add(*dst_sectors, sectors)) {
994 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
995 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
997 p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
999 bch2_data_types[*bucket_data_type ?: ptr_data_type],
1000 orig_sectors, sectors,
1001 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
1005 *bucket_data_type = *dirty_sectors || *cached_sectors
1006 ? ptr_data_type : 0;
1010 static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
1011 struct extent_ptr_decoded p,
1012 s64 sectors, enum bch_data_type data_type,
1013 struct bch_fs_usage *fs_usage,
1014 u64 journal_seq, unsigned flags)
1016 bool gc = flags & BTREE_TRIGGER_GC;
1017 struct bucket_mark old, new;
1018 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1019 struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
1020 u8 bucket_data_type;
1024 v = atomic64_read(&g->_mark.v);
1026 new.v.counter = old.v.counter = v;
1027 bucket_data_type = new.data_type;
1029 ret = __mark_pointer(c, k, p, sectors, data_type, new.gen,
1032 &new.cached_sectors);
1036 new.data_type = bucket_data_type;
1039 new.journal_seq_valid = 1;
1040 new.journal_seq = journal_seq;
1043 if (flags & BTREE_TRIGGER_NOATOMIC) {
1047 } while ((v = atomic64_cmpxchg(&g->_mark.v,
1049 new.v.counter)) != old.v.counter);
1051 bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
1053 BUG_ON(!gc && bucket_became_unavailable(old, new));
1058 static int bch2_mark_stripe_ptr(struct bch_fs *c,
1059 struct bch_extent_stripe_ptr p,
1060 enum bch_data_type data_type,
1061 struct bch_fs_usage *fs_usage,
1062 s64 sectors, unsigned flags,
1063 struct bch_replicas_padded *r,
1065 unsigned *nr_parity)
1067 bool gc = flags & BTREE_TRIGGER_GC;
1069 unsigned i, blocks_nonempty = 0;
1071 m = genradix_ptr(&c->stripes[gc], p.idx);
1073 spin_lock(&c->ec_stripes_heap_lock);
1075 if (!m || !m->alive) {
1076 spin_unlock(&c->ec_stripes_heap_lock);
1077 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
1082 BUG_ON(m->r.e.data_type != data_type);
1084 *nr_data = m->nr_blocks - m->nr_redundant;
1085 *nr_parity = m->nr_redundant;
1088 m->block_sectors[p.block] += sectors;
1090 for (i = 0; i < m->nr_blocks; i++)
1091 blocks_nonempty += m->block_sectors[i] != 0;
1093 if (m->blocks_nonempty != blocks_nonempty) {
1094 m->blocks_nonempty = blocks_nonempty;
1096 bch2_stripes_heap_update(c, m, p.idx);
1099 spin_unlock(&c->ec_stripes_heap_lock);
1104 static int bch2_mark_extent(struct bch_fs *c,
1105 struct bkey_s_c old, struct bkey_s_c new,
1106 unsigned offset, s64 sectors,
1107 enum bch_data_type data_type,
1108 struct bch_fs_usage *fs_usage,
1109 unsigned journal_seq, unsigned flags)
1111 struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1112 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1113 const union bch_extent_entry *entry;
1114 struct extent_ptr_decoded p;
1115 struct bch_replicas_padded r;
1116 s64 dirty_sectors = 0;
1120 r.e.data_type = data_type;
1122 r.e.nr_required = 1;
1126 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1127 s64 disk_sectors = data_type == BCH_DATA_btree
1129 : ptr_disk_sectors_delta(p, offset, sectors, flags);
1131 ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
1132 fs_usage, journal_seq, flags);
1140 update_cached_sectors(c, fs_usage, p.ptr.dev,
1142 } else if (!p.has_ec) {
1143 dirty_sectors += disk_sectors;
1144 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1146 struct bch_replicas_padded ec_r;
1147 unsigned nr_data, nr_parity;
1150 ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
1151 fs_usage, disk_sectors, flags,
1152 &ec_r, &nr_data, &nr_parity);
1157 __ptr_disk_sectors_delta(p.crc.live_size,
1158 offset, sectors, flags,
1159 p.crc.compressed_size * nr_parity,
1160 p.crc.uncompressed_size * nr_data);
1162 update_replicas(c, fs_usage, &ec_r.e,
1163 disk_sectors + parity_sectors);
1166 * There may be other dirty pointers in this extent, but
1167 * if so they're not required for mounting if we have an
1168 * erasure coded pointer in this extent:
1170 r.e.nr_required = 0;
1175 update_replicas(c, fs_usage, &r.e, dirty_sectors);
1180 static int bch2_mark_stripe(struct bch_fs *c,
1181 struct bkey_s_c old, struct bkey_s_c new,
1182 struct bch_fs_usage *fs_usage,
1183 u64 journal_seq, unsigned flags)
1185 bool gc = flags & BTREE_TRIGGER_GC;
1186 size_t idx = new.k->p.offset;
1187 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1188 ? bkey_s_c_to_stripe(old).v : NULL;
1189 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1190 ? bkey_s_c_to_stripe(new).v : NULL;
1191 struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1194 if (!m || (old_s && !m->alive)) {
1195 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1202 for (i = 0; i < old_s->nr_blocks; i++)
1203 bucket_set_stripe(c, old_s->ptrs + i, fs_usage,
1204 journal_seq, flags, false);
1206 if (!gc && m->on_heap) {
1207 spin_lock(&c->ec_stripes_heap_lock);
1208 bch2_stripes_heap_del(c, m, idx);
1209 spin_unlock(&c->ec_stripes_heap_lock);
1212 memset(m, 0, sizeof(*m));
1214 BUG_ON(old_s && new_s->nr_blocks != old_s->nr_blocks);
1215 BUG_ON(old_s && new_s->nr_redundant != old_s->nr_redundant);
1217 for (i = 0; i < new_s->nr_blocks; i++) {
1219 memcmp(new_s->ptrs + i,
1221 sizeof(struct bch_extent_ptr))) {
1224 bucket_set_stripe(c, old_s->ptrs + i, fs_usage,
1225 journal_seq, flags, false);
1226 bucket_set_stripe(c, new_s->ptrs + i, fs_usage,
1227 journal_seq, flags, true);
1232 m->sectors = le16_to_cpu(new_s->sectors);
1233 m->algorithm = new_s->algorithm;
1234 m->nr_blocks = new_s->nr_blocks;
1235 m->nr_redundant = new_s->nr_redundant;
1237 bch2_bkey_to_replicas(&m->r.e, new);
1239 /* gc recalculates these fields: */
1240 if (!(flags & BTREE_TRIGGER_GC)) {
1241 m->blocks_nonempty = 0;
1243 for (i = 0; i < new_s->nr_blocks; i++) {
1244 m->block_sectors[i] =
1245 stripe_blockcount_get(new_s, i);
1246 m->blocks_nonempty += !!m->block_sectors[i];
1251 spin_lock(&c->ec_stripes_heap_lock);
1252 bch2_stripes_heap_update(c, m, idx);
1253 spin_unlock(&c->ec_stripes_heap_lock);
1260 static int bch2_mark_key_locked(struct bch_fs *c,
1261 struct bkey_s_c old,
1262 struct bkey_s_c new,
1263 unsigned offset, s64 sectors,
1264 struct bch_fs_usage *fs_usage,
1265 u64 journal_seq, unsigned flags)
1267 struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1270 BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1274 if (!fs_usage || (flags & BTREE_TRIGGER_GC))
1275 fs_usage = fs_usage_ptr(c, journal_seq,
1276 flags & BTREE_TRIGGER_GC);
1278 switch (k.k->type) {
1279 case KEY_TYPE_alloc:
1280 ret = bch2_mark_alloc(c, old, new, fs_usage, journal_seq, flags);
1282 case KEY_TYPE_btree_ptr:
1283 case KEY_TYPE_btree_ptr_v2:
1284 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1285 ? c->opts.btree_node_size
1286 : -c->opts.btree_node_size;
1288 ret = bch2_mark_extent(c, old, new, offset, sectors,
1289 BCH_DATA_btree, fs_usage, journal_seq, flags);
1291 case KEY_TYPE_extent:
1292 case KEY_TYPE_reflink_v:
1293 ret = bch2_mark_extent(c, old, new, offset, sectors,
1294 BCH_DATA_user, fs_usage, journal_seq, flags);
1296 case KEY_TYPE_stripe:
1297 ret = bch2_mark_stripe(c, old, new, fs_usage, journal_seq, flags);
1299 case KEY_TYPE_inode:
1300 if (!(flags & BTREE_TRIGGER_OVERWRITE))
1301 fs_usage->nr_inodes++;
1303 fs_usage->nr_inodes--;
1305 case KEY_TYPE_reservation: {
1306 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1308 sectors *= replicas;
1309 replicas = clamp_t(unsigned, replicas, 1,
1310 ARRAY_SIZE(fs_usage->persistent_reserved));
1312 fs_usage->reserved += sectors;
1313 fs_usage->persistent_reserved[replicas - 1] += sectors;
1323 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c new,
1324 unsigned offset, s64 sectors,
1325 struct bch_fs_usage *fs_usage,
1326 u64 journal_seq, unsigned flags)
1328 struct bkey deleted;
1329 struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
1332 bkey_init(&deleted);
1334 percpu_down_read(&c->mark_lock);
1335 ret = bch2_mark_key_locked(c, old, new, offset, sectors,
1336 fs_usage, journal_seq,
1337 BTREE_TRIGGER_INSERT|flags);
1338 percpu_up_read(&c->mark_lock);
1343 int bch2_mark_update(struct btree_trans *trans,
1344 struct btree_iter *iter,
1346 struct bch_fs_usage *fs_usage,
1349 struct bch_fs *c = trans->c;
1350 struct btree *b = iter_l(iter)->b;
1351 struct btree_node_iter node_iter = iter_l(iter)->iter;
1352 struct bkey_packed *_old;
1353 struct bkey_s_c old;
1354 struct bkey unpacked;
1357 if (unlikely(flags & BTREE_TRIGGER_NORUN))
1360 if (!btree_node_type_needs_gc(iter->btree_id))
1363 bkey_init(&unpacked);
1364 old = (struct bkey_s_c) { &unpacked, NULL };
1366 if (!btree_node_type_is_extents(iter->btree_id)) {
1367 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
1368 _old = bch2_btree_node_iter_peek(&node_iter, b);
1370 old = bkey_disassemble(b, _old, &unpacked);
1372 struct bkey_cached *ck = (void *) iter->l[0].b;
1375 old = bkey_i_to_s_c(ck->k);
1378 if (old.k->type == new->k.type) {
1379 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1380 fs_usage, trans->journal_res.seq,
1381 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1384 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1385 fs_usage, trans->journal_res.seq,
1386 BTREE_TRIGGER_INSERT|flags);
1387 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1388 fs_usage, trans->journal_res.seq,
1389 BTREE_TRIGGER_OVERWRITE|flags);
1392 BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
1393 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1395 fs_usage, trans->journal_res.seq,
1396 BTREE_TRIGGER_INSERT|flags);
1398 while ((_old = bch2_btree_node_iter_peek(&node_iter, b))) {
1399 unsigned offset = 0;
1402 old = bkey_disassemble(b, _old, &unpacked);
1403 sectors = -((s64) old.k->size);
1405 flags |= BTREE_TRIGGER_OVERWRITE;
1407 if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
1410 switch (bch2_extent_overlap(&new->k, old.k)) {
1411 case BCH_EXTENT_OVERLAP_ALL:
1413 sectors = -((s64) old.k->size);
1415 case BCH_EXTENT_OVERLAP_BACK:
1416 offset = bkey_start_offset(&new->k) -
1417 bkey_start_offset(old.k);
1418 sectors = bkey_start_offset(&new->k) -
1421 case BCH_EXTENT_OVERLAP_FRONT:
1423 sectors = bkey_start_offset(old.k) -
1426 case BCH_EXTENT_OVERLAP_MIDDLE:
1427 offset = bkey_start_offset(&new->k) -
1428 bkey_start_offset(old.k);
1429 sectors = -((s64) new->k.size);
1430 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1434 BUG_ON(sectors >= 0);
1436 ret = bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1437 offset, sectors, fs_usage,
1438 trans->journal_res.seq, flags) ?: 1;
1442 bch2_btree_node_iter_advance(&node_iter, b);
1449 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1450 struct bch_fs_usage *fs_usage)
1452 struct bch_fs *c = trans->c;
1453 struct btree_insert_entry *i;
1454 static int warned_disk_usage = 0;
1455 u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1458 if (!bch2_fs_usage_apply(c, fs_usage, trans->disk_res,
1459 trans->journal_res.seq) ||
1460 warned_disk_usage ||
1461 xchg(&warned_disk_usage, 1))
1464 bch_err(c, "disk usage increased more than %llu sectors reserved",
1467 trans_for_each_update(trans, i) {
1468 pr_err("while inserting");
1469 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1471 pr_err("overlapping with");
1473 if (btree_iter_type(i->iter) != BTREE_ITER_CACHED) {
1474 struct btree *b = iter_l(i->iter)->b;
1475 struct btree_node_iter node_iter = iter_l(i->iter)->iter;
1476 struct bkey_packed *_k;
1478 while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1479 struct bkey unpacked;
1482 pr_info("_k %px format %u", _k, _k->format);
1483 k = bkey_disassemble(b, _k, &unpacked);
1485 if (btree_node_is_extents(b)
1486 ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
1487 : bkey_cmp(i->k->k.p, k.k->p))
1490 bch2_bkey_val_to_text(&PBUF(buf), c, k);
1493 bch2_btree_node_iter_advance(&node_iter, b);
1496 struct bkey_cached *ck = (void *) i->iter->l[0].b;
1499 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
1508 static struct btree_iter *trans_get_update(struct btree_trans *trans,
1509 enum btree_id btree_id, struct bpos pos,
1512 struct btree_insert_entry *i;
1514 trans_for_each_update(trans, i)
1515 if (i->iter->btree_id == btree_id &&
1516 (btree_node_type_is_extents(btree_id)
1517 ? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
1518 bkey_cmp(pos, i->k->k.p) < 0
1519 : !bkey_cmp(pos, i->iter->pos))) {
1520 *k = bkey_i_to_s_c(i->k);
1527 static int trans_get_key(struct btree_trans *trans,
1528 enum btree_id btree_id, struct bpos pos,
1529 struct btree_iter **iter,
1532 unsigned flags = btree_id != BTREE_ID_ALLOC
1534 : BTREE_ITER_CACHED;
1537 *iter = trans_get_update(trans, btree_id, pos, k);
1541 *iter = bch2_trans_get_iter(trans, btree_id, pos,
1542 flags|BTREE_ITER_INTENT);
1544 return PTR_ERR(*iter);
1546 *k = __bch2_btree_iter_peek(*iter, flags);
1549 bch2_trans_iter_put(trans, *iter);
1553 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1554 struct bkey_s_c k, struct extent_ptr_decoded p,
1555 s64 sectors, enum bch_data_type data_type)
1557 struct bch_fs *c = trans->c;
1558 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1559 struct bpos pos = POS(p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr));
1560 struct btree_iter *iter;
1561 struct bkey_s_c k_a;
1562 struct bkey_alloc_unpacked u;
1563 struct bkey_i_alloc *a;
1567 iter = trans_get_update(trans, BTREE_ID_ALLOC, pos, &k_a);
1569 u = bch2_alloc_unpack(k_a);
1571 iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, pos,
1573 BTREE_ITER_CACHED_NOFILL|
1576 return PTR_ERR(iter);
1578 ret = bch2_btree_iter_traverse(iter);
1582 percpu_down_read(&c->mark_lock);
1583 g = bucket(ca, pos.offset);
1584 u = alloc_mem_to_key(g, READ_ONCE(g->mark));
1585 percpu_up_read(&c->mark_lock);
1588 ret = __mark_pointer(c, k, p, sectors, data_type, u.gen, &u.data_type,
1589 &u.dirty_sectors, &u.cached_sectors);
1593 a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
1594 ret = PTR_ERR_OR_ZERO(a);
1598 bkey_alloc_init(&a->k_i);
1600 bch2_alloc_pack(a, u);
1601 bch2_trans_update(trans, iter, &a->k_i, 0);
1603 bch2_trans_iter_put(trans, iter);
1607 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1608 struct bch_extent_stripe_ptr p,
1609 s64 sectors, enum bch_data_type data_type,
1610 struct bch_replicas_padded *r,
1612 unsigned *nr_parity)
1614 struct bch_fs *c = trans->c;
1615 struct btree_iter *iter;
1617 struct bkey_i_stripe *s;
1620 ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
1624 if (k.k->type != KEY_TYPE_stripe) {
1625 bch2_fs_inconsistent(c,
1626 "pointer to nonexistent stripe %llu",
1632 s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1633 ret = PTR_ERR_OR_ZERO(s);
1637 bkey_reassemble(&s->k_i, k);
1639 stripe_blockcount_set(&s->v, p.block,
1640 stripe_blockcount_get(&s->v, p.block) +
1643 *nr_data = s->v.nr_blocks - s->v.nr_redundant;
1644 *nr_parity = s->v.nr_redundant;
1645 bch2_bkey_to_replicas(&r->e, bkey_i_to_s_c(&s->k_i));
1646 bch2_trans_update(trans, iter, &s->k_i, 0);
1648 bch2_trans_iter_put(trans, iter);
1652 static int bch2_trans_mark_extent(struct btree_trans *trans,
1653 struct bkey_s_c k, unsigned offset,
1654 s64 sectors, unsigned flags,
1655 enum bch_data_type data_type)
1657 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1658 const union bch_extent_entry *entry;
1659 struct extent_ptr_decoded p;
1660 struct bch_replicas_padded r;
1661 s64 dirty_sectors = 0;
1665 r.e.data_type = data_type;
1667 r.e.nr_required = 1;
1671 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1672 s64 disk_sectors = data_type == BCH_DATA_btree
1674 : ptr_disk_sectors_delta(p, offset, sectors, flags);
1676 ret = bch2_trans_mark_pointer(trans, k, p, disk_sectors,
1685 update_cached_sectors_list(trans, p.ptr.dev,
1687 } else if (!p.has_ec) {
1688 dirty_sectors += disk_sectors;
1689 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1691 struct bch_replicas_padded ec_r;
1692 unsigned nr_data, nr_parity;
1695 ret = bch2_trans_mark_stripe_ptr(trans, p.ec,
1696 disk_sectors, data_type,
1697 &ec_r, &nr_data, &nr_parity);
1702 __ptr_disk_sectors_delta(p.crc.live_size,
1703 offset, sectors, flags,
1704 p.crc.compressed_size * nr_parity,
1705 p.crc.uncompressed_size * nr_data);
1707 update_replicas_list(trans, &ec_r.e,
1708 disk_sectors + parity_sectors);
1710 r.e.nr_required = 0;
1715 update_replicas_list(trans, &r.e, dirty_sectors);
1720 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1721 struct bkey_s_c_reflink_p p,
1722 u64 idx, unsigned sectors,
1725 struct bch_fs *c = trans->c;
1726 struct btree_iter *iter;
1728 struct bkey_i_reflink_v *r_v;
1731 ret = trans_get_key(trans, BTREE_ID_REFLINK,
1732 POS(0, idx), &iter, &k);
1736 if (k.k->type != KEY_TYPE_reflink_v) {
1737 bch2_fs_inconsistent(c,
1738 "%llu:%llu len %u points to nonexistent indirect extent %llu",
1739 p.k->p.inode, p.k->p.offset, p.k->size, idx);
1744 if ((flags & BTREE_TRIGGER_OVERWRITE) &&
1745 (bkey_start_offset(k.k) < idx ||
1746 k.k->p.offset > idx + sectors))
1749 sectors = k.k->p.offset - idx;
1751 r_v = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1752 ret = PTR_ERR_OR_ZERO(r_v);
1756 bkey_reassemble(&r_v->k_i, k);
1758 le64_add_cpu(&r_v->v.refcount,
1759 !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1);
1761 if (!r_v->v.refcount) {
1762 r_v->k.type = KEY_TYPE_deleted;
1763 set_bkey_val_u64s(&r_v->k, 0);
1766 bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1767 BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1769 bch2_trans_update(trans, iter, &r_v->k_i, 0);
1773 bch2_trans_iter_put(trans, iter);
1777 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1778 struct bkey_s_c_reflink_p p, unsigned offset,
1779 s64 sectors, unsigned flags)
1781 u64 idx = le64_to_cpu(p.v->idx) + offset;
1784 sectors = abs(sectors);
1785 BUG_ON(offset + sectors > p.k->size);
1788 ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors, flags);
1793 sectors = max_t(s64, 0LL, sectors - ret);
1800 int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
1801 unsigned offset, s64 sectors, unsigned flags)
1803 struct replicas_delta_list *d;
1804 struct bch_fs *c = trans->c;
1806 switch (k.k->type) {
1807 case KEY_TYPE_btree_ptr:
1808 case KEY_TYPE_btree_ptr_v2:
1809 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1810 ? c->opts.btree_node_size
1811 : -c->opts.btree_node_size;
1813 return bch2_trans_mark_extent(trans, k, offset, sectors,
1814 flags, BCH_DATA_btree);
1815 case KEY_TYPE_extent:
1816 case KEY_TYPE_reflink_v:
1817 return bch2_trans_mark_extent(trans, k, offset, sectors,
1818 flags, BCH_DATA_user);
1819 case KEY_TYPE_inode:
1820 d = replicas_deltas_realloc(trans, 0);
1822 if (!(flags & BTREE_TRIGGER_OVERWRITE))
1827 case KEY_TYPE_reservation: {
1828 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1830 d = replicas_deltas_realloc(trans, 0);
1832 sectors *= replicas;
1833 replicas = clamp_t(unsigned, replicas, 1,
1834 ARRAY_SIZE(d->persistent_reserved));
1836 d->persistent_reserved[replicas - 1] += sectors;
1839 case KEY_TYPE_reflink_p:
1840 return bch2_trans_mark_reflink_p(trans,
1841 bkey_s_c_to_reflink_p(k),
1842 offset, sectors, flags);
1848 int bch2_trans_mark_update(struct btree_trans *trans,
1849 struct btree_iter *iter,
1850 struct bkey_i *insert,
1853 struct btree *b = iter_l(iter)->b;
1854 struct btree_node_iter node_iter = iter_l(iter)->iter;
1855 struct bkey_packed *_k;
1858 if (unlikely(flags & BTREE_TRIGGER_NORUN))
1861 if (!btree_node_type_needs_gc(iter->btree_id))
1864 ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(insert),
1865 0, insert->k.size, BTREE_TRIGGER_INSERT);
1869 if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
1870 struct bkey_cached *ck = (void *) iter->l[0].b;
1872 return bch2_trans_mark_key(trans, bkey_i_to_s_c(ck->k),
1873 0, 0, BTREE_TRIGGER_OVERWRITE);
1876 while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1877 struct bkey unpacked;
1879 unsigned offset = 0;
1881 unsigned flags = BTREE_TRIGGER_OVERWRITE;
1883 k = bkey_disassemble(b, _k, &unpacked);
1885 if (btree_node_is_extents(b)
1886 ? bkey_cmp(insert->k.p, bkey_start_pos(k.k)) <= 0
1887 : bkey_cmp(insert->k.p, k.k->p))
1890 if (btree_node_is_extents(b)) {
1891 switch (bch2_extent_overlap(&insert->k, k.k)) {
1892 case BCH_EXTENT_OVERLAP_ALL:
1894 sectors = -((s64) k.k->size);
1896 case BCH_EXTENT_OVERLAP_BACK:
1897 offset = bkey_start_offset(&insert->k) -
1898 bkey_start_offset(k.k);
1899 sectors = bkey_start_offset(&insert->k) -
1902 case BCH_EXTENT_OVERLAP_FRONT:
1904 sectors = bkey_start_offset(k.k) -
1907 case BCH_EXTENT_OVERLAP_MIDDLE:
1908 offset = bkey_start_offset(&insert->k) -
1909 bkey_start_offset(k.k);
1910 sectors = -((s64) insert->k.size);
1911 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1915 BUG_ON(sectors >= 0);
1918 ret = bch2_trans_mark_key(trans, k, offset, sectors, flags);
1922 bch2_btree_node_iter_advance(&node_iter, b);
1928 /* Disk reservations: */
1930 static u64 bch2_recalc_sectors_available(struct bch_fs *c)
1932 percpu_u64_set(&c->pcpu->sectors_available, 0);
1934 return avail_factor(__bch2_fs_usage_read_short(c).free);
1937 void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
1939 percpu_down_read(&c->mark_lock);
1940 this_cpu_sub(c->usage[0]->online_reserved,
1942 percpu_up_read(&c->mark_lock);
1947 #define SECTORS_CACHE 1024
1949 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1950 unsigned sectors, int flags)
1952 struct bch_fs_pcpu *pcpu;
1954 s64 sectors_available;
1957 percpu_down_read(&c->mark_lock);
1959 pcpu = this_cpu_ptr(c->pcpu);
1961 if (sectors <= pcpu->sectors_available)
1964 v = atomic64_read(&c->sectors_available);
1967 get = min((u64) sectors + SECTORS_CACHE, old);
1969 if (get < sectors) {
1971 percpu_up_read(&c->mark_lock);
1974 } while ((v = atomic64_cmpxchg(&c->sectors_available,
1975 old, old - get)) != old);
1977 pcpu->sectors_available += get;
1980 pcpu->sectors_available -= sectors;
1981 this_cpu_add(c->usage[0]->online_reserved, sectors);
1982 res->sectors += sectors;
1985 percpu_up_read(&c->mark_lock);
1989 percpu_down_write(&c->mark_lock);
1991 sectors_available = bch2_recalc_sectors_available(c);
1993 if (sectors <= sectors_available ||
1994 (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1995 atomic64_set(&c->sectors_available,
1996 max_t(s64, 0, sectors_available - sectors));
1997 this_cpu_add(c->usage[0]->online_reserved, sectors);
1998 res->sectors += sectors;
2001 atomic64_set(&c->sectors_available, sectors_available);
2005 percpu_up_write(&c->mark_lock);
2010 /* Startup/shutdown: */
2012 static void buckets_free_rcu(struct rcu_head *rcu)
2014 struct bucket_array *buckets =
2015 container_of(rcu, struct bucket_array, rcu);
2018 sizeof(struct bucket_array) +
2019 buckets->nbuckets * sizeof(struct bucket));
2022 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2024 struct bucket_array *buckets = NULL, *old_buckets = NULL;
2025 unsigned long *buckets_nouse = NULL;
2026 alloc_fifo free[RESERVE_NR];
2027 alloc_fifo free_inc;
2028 alloc_heap alloc_heap;
2030 size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
2031 ca->mi.bucket_size / c->opts.btree_node_size);
2032 /* XXX: these should be tunable */
2033 size_t reserve_none = max_t(size_t, 1, nbuckets >> 9);
2034 size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 7);
2035 size_t free_inc_nr = max(max_t(size_t, 1, nbuckets >> 12),
2037 bool resize = ca->buckets[0] != NULL;
2041 memset(&free, 0, sizeof(free));
2042 memset(&free_inc, 0, sizeof(free_inc));
2043 memset(&alloc_heap, 0, sizeof(alloc_heap));
2045 if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
2046 nbuckets * sizeof(struct bucket),
2047 GFP_KERNEL|__GFP_ZERO)) ||
2048 !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2049 sizeof(unsigned long),
2050 GFP_KERNEL|__GFP_ZERO)) ||
2051 !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
2052 !init_fifo(&free[RESERVE_MOVINGGC],
2053 copygc_reserve, GFP_KERNEL) ||
2054 !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
2055 !init_fifo(&free_inc, free_inc_nr, GFP_KERNEL) ||
2056 !init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
2059 buckets->first_bucket = ca->mi.first_bucket;
2060 buckets->nbuckets = nbuckets;
2062 bch2_copygc_stop(c);
2065 down_write(&c->gc_lock);
2066 down_write(&ca->bucket_lock);
2067 percpu_down_write(&c->mark_lock);
2070 old_buckets = bucket_array(ca);
2073 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
2077 n * sizeof(struct bucket));
2078 memcpy(buckets_nouse,
2080 BITS_TO_LONGS(n) * sizeof(unsigned long));
2083 rcu_assign_pointer(ca->buckets[0], buckets);
2084 buckets = old_buckets;
2086 swap(ca->buckets_nouse, buckets_nouse);
2089 percpu_up_write(&c->mark_lock);
2090 up_write(&c->gc_lock);
2093 spin_lock(&c->freelist_lock);
2094 for (i = 0; i < RESERVE_NR; i++) {
2095 fifo_move(&free[i], &ca->free[i]);
2096 swap(ca->free[i], free[i]);
2098 fifo_move(&free_inc, &ca->free_inc);
2099 swap(ca->free_inc, free_inc);
2100 spin_unlock(&c->freelist_lock);
2102 /* with gc lock held, alloc_heap can't be in use: */
2103 swap(ca->alloc_heap, alloc_heap);
2105 nbuckets = ca->mi.nbuckets;
2108 up_write(&ca->bucket_lock);
2112 free_heap(&alloc_heap);
2113 free_fifo(&free_inc);
2114 for (i = 0; i < RESERVE_NR; i++)
2115 free_fifo(&free[i]);
2116 kvpfree(buckets_nouse,
2117 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2119 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2124 void bch2_dev_buckets_free(struct bch_dev *ca)
2128 free_heap(&ca->alloc_heap);
2129 free_fifo(&ca->free_inc);
2130 for (i = 0; i < RESERVE_NR; i++)
2131 free_fifo(&ca->free[i]);
2132 kvpfree(ca->buckets_nouse,
2133 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2134 kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2135 sizeof(struct bucket_array) +
2136 ca->mi.nbuckets * sizeof(struct bucket));
2138 free_percpu(ca->usage[0]);
2141 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2143 if (!(ca->usage[0] = alloc_percpu(struct bch_dev_usage)))
2146 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;