1 // SPDX-License-Identifier: GPL-2.0
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
9 #include "alloc_background.h"
12 #include "btree_update.h"
20 #include <linux/preempt.h>
21 #include <trace/events/bcachefs.h>
23 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
24 enum bch_data_type data_type,
29 fs_usage->btree += sectors;
33 fs_usage->data += sectors;
36 fs_usage->cached += sectors;
44 * Clear journal_seq_valid for buckets for which it's not needed, to prevent
47 void bch2_bucket_seq_cleanup(struct bch_fs *c)
49 u64 journal_seq = atomic64_read(&c->journal.seq);
50 u16 last_seq_ondisk = c->journal.last_seq_ondisk;
52 struct bucket_array *buckets;
57 if (journal_seq - c->last_bucket_seq_cleanup <
58 (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
61 c->last_bucket_seq_cleanup = journal_seq;
63 for_each_member_device(ca, c, i) {
64 down_read(&ca->bucket_lock);
65 buckets = bucket_array(ca);
67 for_each_bucket(g, buckets) {
68 bucket_cmpxchg(g, m, ({
69 if (!m.journal_seq_valid ||
70 bucket_needs_journal_commit(m, last_seq_ondisk))
73 m.journal_seq_valid = 0;
76 up_read(&ca->bucket_lock);
80 void bch2_fs_usage_initialize(struct bch_fs *c)
82 struct bch_fs_usage *usage;
86 percpu_down_write(&c->mark_lock);
87 usage = c->usage_base;
89 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
90 bch2_fs_usage_acc_to_base(c, i);
92 for (i = 0; i < BCH_REPLICAS_MAX; i++)
93 usage->reserved += usage->persistent_reserved[i];
95 for (i = 0; i < c->replicas.nr; i++) {
96 struct bch_replicas_entry *e =
97 cpu_replicas_entry(&c->replicas, i);
99 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
102 for_each_member_device(ca, c, i) {
103 struct bch_dev_usage dev = bch2_dev_usage_read(ca);
105 usage->hidden += (dev.d[BCH_DATA_sb].buckets +
106 dev.d[BCH_DATA_journal].buckets) *
110 percpu_up_write(&c->mark_lock);
113 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
114 unsigned journal_seq,
117 return this_cpu_ptr(gc
119 : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
122 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
124 struct bch_fs *c = ca->fs;
125 struct bch_dev_usage ret;
126 unsigned seq, i, u64s = dev_usage_u64s();
129 seq = read_seqcount_begin(&c->usage_lock);
130 memcpy(&ret, ca->usage_base, u64s * sizeof(u64));
131 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
132 acc_u64s_percpu((u64 *) &ret, (u64 __percpu *) ca->usage[i], u64s);
133 } while (read_seqcount_retry(&c->usage_lock, seq));
138 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
139 unsigned journal_seq,
142 return this_cpu_ptr(gc
144 : c->usage[journal_seq & JOURNAL_BUF_MASK]);
147 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
149 ssize_t offset = v - (u64 *) c->usage_base;
153 BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
154 percpu_rwsem_assert_held(&c->mark_lock);
157 seq = read_seqcount_begin(&c->usage_lock);
160 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
161 ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
162 } while (read_seqcount_retry(&c->usage_lock, seq));
167 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
169 struct bch_fs_usage_online *ret;
170 unsigned seq, i, u64s;
172 percpu_down_read(&c->mark_lock);
174 ret = kmalloc(sizeof(struct bch_fs_usage_online) +
175 sizeof(u64) * c->replicas.nr, GFP_NOFS);
176 if (unlikely(!ret)) {
177 percpu_up_read(&c->mark_lock);
181 ret->online_reserved = percpu_u64_get(c->online_reserved);
183 u64s = fs_usage_u64s(c);
185 seq = read_seqcount_begin(&c->usage_lock);
186 memcpy(&ret->u, c->usage_base, u64s * sizeof(u64));
187 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
188 acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], u64s);
189 } while (read_seqcount_retry(&c->usage_lock, seq));
194 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
197 unsigned i, u64s = fs_usage_u64s(c);
199 BUG_ON(idx >= ARRAY_SIZE(c->usage));
202 write_seqcount_begin(&c->usage_lock);
204 acc_u64s_percpu((u64 *) c->usage_base,
205 (u64 __percpu *) c->usage[idx], u64s);
206 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
209 for_each_member_device_rcu(ca, c, i, NULL) {
210 u64s = dev_usage_u64s();
212 acc_u64s_percpu((u64 *) ca->usage_base,
213 (u64 __percpu *) ca->usage[idx], u64s);
214 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
218 write_seqcount_end(&c->usage_lock);
222 void bch2_fs_usage_to_text(struct printbuf *out,
224 struct bch_fs_usage_online *fs_usage)
228 pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
230 pr_buf(out, "hidden:\t\t\t\t%llu\n",
232 pr_buf(out, "data:\t\t\t\t%llu\n",
234 pr_buf(out, "cached:\t\t\t\t%llu\n",
236 pr_buf(out, "reserved:\t\t\t%llu\n",
237 fs_usage->u.reserved);
238 pr_buf(out, "nr_inodes:\t\t\t%llu\n",
239 fs_usage->u.nr_inodes);
240 pr_buf(out, "online reserved:\t\t%llu\n",
241 fs_usage->online_reserved);
244 i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
246 pr_buf(out, "%u replicas:\n", i + 1);
247 pr_buf(out, "\treserved:\t\t%llu\n",
248 fs_usage->u.persistent_reserved[i]);
251 for (i = 0; i < c->replicas.nr; i++) {
252 struct bch_replicas_entry *e =
253 cpu_replicas_entry(&c->replicas, i);
256 bch2_replicas_entry_to_text(out, e);
257 pr_buf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
261 static u64 reserve_factor(u64 r)
263 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
266 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
268 return min(fs_usage->u.hidden +
271 reserve_factor(fs_usage->u.reserved +
272 fs_usage->online_reserved),
276 static struct bch_fs_usage_short
277 __bch2_fs_usage_read_short(struct bch_fs *c)
279 struct bch_fs_usage_short ret;
282 ret.capacity = c->capacity -
283 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
285 data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
286 bch2_fs_usage_read_one(c, &c->usage_base->btree);
287 reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
288 percpu_u64_get(c->online_reserved);
290 ret.used = min(ret.capacity, data + reserve_factor(reserved));
291 ret.free = ret.capacity - ret.used;
293 ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
298 struct bch_fs_usage_short
299 bch2_fs_usage_read_short(struct bch_fs *c)
301 struct bch_fs_usage_short ret;
303 percpu_down_read(&c->mark_lock);
304 ret = __bch2_fs_usage_read_short(c);
305 percpu_up_read(&c->mark_lock);
310 static inline int is_unavailable_bucket(struct bucket_mark m)
312 return !is_available_bucket(m);
315 static inline int bucket_sectors_fragmented(struct bch_dev *ca,
316 struct bucket_mark m)
318 return bucket_sectors_used(m)
319 ? max(0, (int) ca->mi.bucket_size - (int) bucket_sectors_used(m))
323 static inline int is_stripe_data_bucket(struct bucket_mark m)
325 return m.stripe && m.data_type != BCH_DATA_parity;
328 static inline enum bch_data_type bucket_type(struct bucket_mark m)
330 return m.cached_sectors && !m.dirty_sectors
335 static bool bucket_became_unavailable(struct bucket_mark old,
336 struct bucket_mark new)
338 return is_available_bucket(old) &&
339 !is_available_bucket(new);
342 static inline void account_bucket(struct bch_fs_usage *fs_usage,
343 struct bch_dev_usage *dev_usage,
344 enum bch_data_type type,
347 if (type == BCH_DATA_sb || type == BCH_DATA_journal)
348 fs_usage->hidden += size;
350 dev_usage->d[type].buckets += nr;
353 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
354 struct bch_fs_usage *fs_usage,
355 struct bucket_mark old, struct bucket_mark new,
356 u64 journal_seq, bool gc)
358 struct bch_dev_usage *u;
360 percpu_rwsem_assert_held(&c->mark_lock);
364 fs_usage = fs_usage_ptr(c, journal_seq, gc);
365 u = dev_usage_ptr(ca, journal_seq, gc);
367 if (bucket_type(old))
368 account_bucket(fs_usage, u, bucket_type(old),
369 -1, -ca->mi.bucket_size);
371 if (bucket_type(new))
372 account_bucket(fs_usage, u, bucket_type(new),
373 1, ca->mi.bucket_size);
375 u->buckets_ec += (int) new.stripe - (int) old.stripe;
376 u->buckets_unavailable +=
377 is_unavailable_bucket(new) - is_unavailable_bucket(old);
379 u->d[old.data_type].sectors -= old.dirty_sectors;
380 u->d[new.data_type].sectors += new.dirty_sectors;
381 u->d[BCH_DATA_cached].sectors +=
382 (int) new.cached_sectors - (int) old.cached_sectors;
384 u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old);
385 u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new);
389 if (!is_available_bucket(old) && is_available_bucket(new))
390 bch2_wake_allocator(ca);
393 static inline int update_replicas(struct bch_fs *c,
394 struct bch_fs_usage *fs_usage,
395 struct bch_replicas_entry *r,
398 int idx = bch2_replicas_entry_idx(c, r);
403 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
404 fs_usage->replicas[idx] += sectors;
408 static inline int update_cached_sectors(struct bch_fs *c,
409 struct bch_fs_usage *fs_usage,
410 unsigned dev, s64 sectors)
412 struct bch_replicas_padded r;
414 bch2_replicas_entry_cached(&r.e, dev);
416 return update_replicas(c, fs_usage, &r.e, sectors);
419 static struct replicas_delta_list *
420 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
422 struct replicas_delta_list *d = trans->fs_usage_deltas;
423 unsigned new_size = d ? (d->size + more) * 2 : 128;
424 unsigned alloc_size = sizeof(*d) + new_size;
426 WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
428 if (!d || d->used + more > d->size) {
429 d = krealloc(d, alloc_size, GFP_NOIO|__GFP_ZERO);
431 BUG_ON(!d && alloc_size > REPLICAS_DELTA_LIST_MAX);
434 d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOIO);
435 memset(d, 0, REPLICAS_DELTA_LIST_MAX);
437 if (trans->fs_usage_deltas)
438 memcpy(d, trans->fs_usage_deltas,
439 trans->fs_usage_deltas->size + sizeof(*d));
441 new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
442 kfree(trans->fs_usage_deltas);
446 trans->fs_usage_deltas = d;
451 static inline void update_replicas_list(struct btree_trans *trans,
452 struct bch_replicas_entry *r,
455 struct replicas_delta_list *d;
456 struct replicas_delta *n;
462 b = replicas_entry_bytes(r) + 8;
463 d = replicas_deltas_realloc(trans, b);
465 n = (void *) d->d + d->used;
467 memcpy(&n->r, r, replicas_entry_bytes(r));
468 bch2_replicas_entry_sort(&n->r);
472 static inline void update_cached_sectors_list(struct btree_trans *trans,
473 unsigned dev, s64 sectors)
475 struct bch_replicas_padded r;
477 bch2_replicas_entry_cached(&r.e, dev);
479 update_replicas_list(trans, &r.e, sectors);
482 #define do_mark_fn(fn, c, pos, flags, ...) \
486 percpu_rwsem_assert_held(&c->mark_lock); \
488 for (gc = 0; gc < 2 && !ret; gc++) \
489 if (!gc == !(flags & BTREE_TRIGGER_GC) || \
490 (gc && gc_visited(c, pos))) \
491 ret = fn(c, __VA_ARGS__, gc); \
495 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
496 size_t b, bool owned_by_allocator)
498 struct bucket *g = bucket(ca, b);
499 struct bucket_mark old, new;
501 old = bucket_cmpxchg(g, new, ({
502 new.owned_by_allocator = owned_by_allocator;
505 BUG_ON(owned_by_allocator == old.owned_by_allocator);
508 static int bch2_mark_alloc(struct bch_fs *c,
509 struct bkey_s_c old, struct bkey_s_c new,
510 struct bch_fs_usage *fs_usage,
511 u64 journal_seq, unsigned flags)
513 bool gc = flags & BTREE_TRIGGER_GC;
514 struct bkey_alloc_unpacked u;
517 struct bucket_mark old_m, m;
519 /* We don't do anything for deletions - do we?: */
520 if (new.k->type != KEY_TYPE_alloc &&
521 new.k->type != KEY_TYPE_alloc_v2)
525 * alloc btree is read in by bch2_alloc_read, not gc:
527 if ((flags & BTREE_TRIGGER_GC) &&
528 !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
531 ca = bch_dev_bkey_exists(c, new.k->p.inode);
533 if (new.k->p.offset >= ca->mi.nbuckets)
536 g = __bucket(ca, new.k->p.offset, gc);
537 u = bch2_alloc_unpack(new);
539 old_m = bucket_cmpxchg(g, m, ({
541 m.data_type = u.data_type;
542 m.dirty_sectors = u.dirty_sectors;
543 m.cached_sectors = u.cached_sectors;
544 m.stripe = u.stripe != 0;
547 m.journal_seq_valid = 1;
548 m.journal_seq = journal_seq;
552 bch2_dev_usage_update(c, ca, fs_usage, old_m, m, journal_seq, gc);
554 g->io_time[READ] = u.read_time;
555 g->io_time[WRITE] = u.write_time;
556 g->oldest_gen = u.oldest_gen;
558 g->stripe = u.stripe;
559 g->stripe_redundancy = u.stripe_redundancy;
562 * need to know if we're getting called from the invalidate path or
566 if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
567 old_m.cached_sectors) {
568 if (update_cached_sectors(c, fs_usage, ca->dev_idx,
569 -old_m.cached_sectors)) {
570 bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors");
574 trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
575 old_m.cached_sectors);
581 #define checked_add(a, b) \
583 unsigned _res = (unsigned) (a) + (b); \
584 bool overflow = _res > U16_MAX; \
591 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
592 size_t b, enum bch_data_type data_type,
593 unsigned sectors, bool gc)
595 struct bucket *g = __bucket(ca, b, gc);
596 struct bucket_mark old, new;
599 BUG_ON(data_type != BCH_DATA_sb &&
600 data_type != BCH_DATA_journal);
602 old = bucket_cmpxchg(g, new, ({
603 new.data_type = data_type;
604 overflow = checked_add(new.dirty_sectors, sectors);
607 bch2_fs_inconsistent_on(old.data_type &&
608 old.data_type != data_type, c,
609 "different types of data in same bucket: %s, %s",
610 bch2_data_types[old.data_type],
611 bch2_data_types[data_type]);
613 bch2_fs_inconsistent_on(overflow, c,
614 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
615 ca->dev_idx, b, new.gen,
616 bch2_data_types[old.data_type ?: data_type],
617 old.dirty_sectors, sectors);
620 bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc),
626 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
627 size_t b, enum bch_data_type type,
628 unsigned sectors, struct gc_pos pos,
631 BUG_ON(type != BCH_DATA_sb &&
632 type != BCH_DATA_journal);
635 * Backup superblock might be past the end of our normal usable space:
637 if (b >= ca->mi.nbuckets)
643 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
644 ca, b, type, sectors);
646 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
652 static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
654 return DIV_ROUND_UP(sectors * n, d);
657 static s64 __ptr_disk_sectors_delta(unsigned old_size,
658 unsigned offset, s64 delta,
660 unsigned n, unsigned d)
664 if (flags & BTREE_TRIGGER_OVERWRITE_SPLIT) {
665 BUG_ON(offset + -delta > old_size);
667 return -disk_sectors_scaled(n, d, old_size) +
668 disk_sectors_scaled(n, d, offset) +
669 disk_sectors_scaled(n, d, old_size - offset + delta);
670 } else if (flags & BTREE_TRIGGER_OVERWRITE) {
671 BUG_ON(offset + -delta > old_size);
673 return -disk_sectors_scaled(n, d, old_size) +
674 disk_sectors_scaled(n, d, old_size + delta);
676 return disk_sectors_scaled(n, d, delta);
680 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
681 unsigned offset, s64 delta,
684 return __ptr_disk_sectors_delta(p.crc.live_size,
685 offset, delta, flags,
686 p.crc.compressed_size,
687 p.crc.uncompressed_size);
690 static int check_bucket_ref(struct bch_fs *c, struct bkey_s_c k,
691 const struct bch_extent_ptr *ptr,
692 s64 sectors, enum bch_data_type ptr_data_type,
693 u8 bucket_gen, u8 bucket_data_type,
694 u16 dirty_sectors, u16 cached_sectors)
696 size_t bucket_nr = PTR_BUCKET_NR(bch_dev_bkey_exists(c, ptr->dev), ptr);
697 u16 bucket_sectors = !ptr->cached
702 if (gen_after(ptr->gen, bucket_gen)) {
703 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
704 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
706 ptr->dev, bucket_nr, bucket_gen,
707 bch2_data_types[bucket_data_type ?: ptr_data_type],
709 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
713 if (gen_cmp(bucket_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
714 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
715 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
717 ptr->dev, bucket_nr, bucket_gen,
718 bch2_data_types[bucket_data_type ?: ptr_data_type],
720 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
724 if (bucket_gen != ptr->gen && !ptr->cached) {
725 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
726 "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
728 ptr->dev, bucket_nr, bucket_gen,
729 bch2_data_types[bucket_data_type ?: ptr_data_type],
731 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
735 if (bucket_gen != ptr->gen)
738 if (bucket_data_type && ptr_data_type &&
739 bucket_data_type != ptr_data_type) {
740 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
741 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
743 ptr->dev, bucket_nr, bucket_gen,
744 bch2_data_types[bucket_data_type],
745 bch2_data_types[ptr_data_type],
746 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
750 if ((unsigned) (bucket_sectors + sectors) > U16_MAX) {
751 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
752 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
754 ptr->dev, bucket_nr, bucket_gen,
755 bch2_data_types[bucket_data_type ?: ptr_data_type],
756 bucket_sectors, sectors,
757 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
764 static int mark_stripe_bucket(struct bch_fs *c, struct bkey_s_c k,
766 struct bch_fs_usage *fs_usage,
767 u64 journal_seq, unsigned flags)
769 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
770 unsigned nr_data = s->nr_blocks - s->nr_redundant;
771 bool parity = ptr_idx >= nr_data;
772 const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
773 bool gc = flags & BTREE_TRIGGER_GC;
774 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
775 struct bucket *g = PTR_BUCKET(ca, ptr, gc);
776 struct bucket_mark new, old;
780 if (g->stripe && g->stripe != k.k->p.offset) {
781 bch2_fs_inconsistent(c,
782 "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
783 ptr->dev, PTR_BUCKET_NR(ca, ptr), g->mark.gen,
784 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
788 old = bucket_cmpxchg(g, new, ({
789 ret = check_bucket_ref(c, k, ptr, 0, 0, new.gen, new.data_type,
790 new.dirty_sectors, new.cached_sectors);
795 new.data_type = BCH_DATA_parity;
796 new.dirty_sectors = le16_to_cpu(s->sectors);
800 new.journal_seq_valid = 1;
801 new.journal_seq = journal_seq;
805 g->stripe = k.k->p.offset;
806 g->stripe_redundancy = s->nr_redundant;
808 bch2_dev_usage_update(c, ca, fs_usage, old, new, journal_seq, gc);
812 static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
813 const struct bch_extent_ptr *ptr,
814 s64 sectors, enum bch_data_type ptr_data_type,
815 u8 bucket_gen, u8 *bucket_data_type,
816 u16 *dirty_sectors, u16 *cached_sectors)
818 u16 *dst_sectors = !ptr->cached
821 int ret = check_bucket_ref(c, k, ptr, sectors, ptr_data_type,
822 bucket_gen, *bucket_data_type,
823 *dirty_sectors, *cached_sectors);
828 *dst_sectors += sectors;
829 *bucket_data_type = *dirty_sectors || *cached_sectors
834 static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
835 struct extent_ptr_decoded p,
836 s64 sectors, enum bch_data_type data_type,
837 struct bch_fs_usage *fs_usage,
838 u64 journal_seq, unsigned flags)
840 bool gc = flags & BTREE_TRIGGER_GC;
841 struct bucket_mark old, new;
842 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
843 struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
848 v = atomic64_read(&g->_mark.v);
850 new.v.counter = old.v.counter = v;
851 bucket_data_type = new.data_type;
853 ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, new.gen,
856 &new.cached_sectors);
860 new.data_type = bucket_data_type;
863 new.journal_seq_valid = 1;
864 new.journal_seq = journal_seq;
867 if (flags & BTREE_TRIGGER_NOATOMIC) {
871 } while ((v = atomic64_cmpxchg(&g->_mark.v,
873 new.v.counter)) != old.v.counter);
875 bch2_dev_usage_update(c, ca, fs_usage, old, new, journal_seq, gc);
877 BUG_ON(!gc && bucket_became_unavailable(old, new));
882 static int bch2_mark_stripe_ptr(struct bch_fs *c,
883 struct bch_extent_stripe_ptr p,
884 enum bch_data_type data_type,
885 struct bch_fs_usage *fs_usage,
886 s64 sectors, unsigned flags)
888 bool gc = flags & BTREE_TRIGGER_GC;
889 struct bch_replicas_padded r;
891 unsigned i, blocks_nonempty = 0;
893 m = genradix_ptr(&c->stripes[gc], p.idx);
895 spin_lock(&c->ec_stripes_heap_lock);
897 if (!m || !m->alive) {
898 spin_unlock(&c->ec_stripes_heap_lock);
899 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
901 bch2_inconsistent_error(c);
905 m->block_sectors[p.block] += sectors;
909 for (i = 0; i < m->nr_blocks; i++)
910 blocks_nonempty += m->block_sectors[i] != 0;
912 if (m->blocks_nonempty != blocks_nonempty) {
913 m->blocks_nonempty = blocks_nonempty;
915 bch2_stripes_heap_update(c, m, p.idx);
918 spin_unlock(&c->ec_stripes_heap_lock);
920 r.e.data_type = data_type;
921 update_replicas(c, fs_usage, &r.e, sectors);
926 static int bch2_mark_extent(struct bch_fs *c,
927 struct bkey_s_c old, struct bkey_s_c new,
928 unsigned offset, s64 sectors,
929 enum bch_data_type data_type,
930 struct bch_fs_usage *fs_usage,
931 unsigned journal_seq, unsigned flags)
933 struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
934 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
935 const union bch_extent_entry *entry;
936 struct extent_ptr_decoded p;
937 struct bch_replicas_padded r;
938 s64 dirty_sectors = 0;
942 r.e.data_type = data_type;
948 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
949 s64 disk_sectors = data_type == BCH_DATA_btree
951 : ptr_disk_sectors_delta(p, offset, sectors, flags);
953 ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
954 fs_usage, journal_seq, flags);
962 if (update_cached_sectors(c, fs_usage, p.ptr.dev,
964 bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
968 } else if (!p.has_ec) {
969 dirty_sectors += disk_sectors;
970 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
972 ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
973 fs_usage, disk_sectors, flags);
978 * There may be other dirty pointers in this extent, but
979 * if so they're not required for mounting if we have an
980 * erasure coded pointer in this extent:
987 if (update_replicas(c, fs_usage, &r.e, dirty_sectors)) {
990 bch2_bkey_val_to_text(&PBUF(buf), c, k);
991 bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
999 static int bch2_mark_stripe(struct bch_fs *c,
1000 struct bkey_s_c old, struct bkey_s_c new,
1001 struct bch_fs_usage *fs_usage,
1002 u64 journal_seq, unsigned flags)
1004 bool gc = flags & BTREE_TRIGGER_GC;
1005 size_t idx = new.k->p.offset;
1006 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1007 ? bkey_s_c_to_stripe(old).v : NULL;
1008 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1009 ? bkey_s_c_to_stripe(new).v : NULL;
1010 struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1014 BUG_ON(gc && old_s);
1016 if (!m || (old_s && !m->alive)) {
1017 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1019 bch2_inconsistent_error(c);
1024 spin_lock(&c->ec_stripes_heap_lock);
1025 bch2_stripes_heap_del(c, m, idx);
1026 spin_unlock(&c->ec_stripes_heap_lock);
1028 memset(m, 0, sizeof(*m));
1031 m->sectors = le16_to_cpu(new_s->sectors);
1032 m->algorithm = new_s->algorithm;
1033 m->nr_blocks = new_s->nr_blocks;
1034 m->nr_redundant = new_s->nr_redundant;
1035 m->blocks_nonempty = 0;
1037 for (i = 0; i < new_s->nr_blocks; i++) {
1038 m->block_sectors[i] =
1039 stripe_blockcount_get(new_s, i);
1040 m->blocks_nonempty += !!m->block_sectors[i];
1042 m->ptrs[i] = new_s->ptrs[i];
1045 bch2_bkey_to_replicas(&m->r.e, new);
1048 spin_lock(&c->ec_stripes_heap_lock);
1049 bch2_stripes_heap_update(c, m, idx);
1050 spin_unlock(&c->ec_stripes_heap_lock);
1056 * gc recalculates this field from stripe ptr
1059 memset(m->block_sectors, 0, sizeof(m->block_sectors));
1060 m->blocks_nonempty = 0;
1062 for (i = 0; i < new_s->nr_blocks; i++) {
1063 ret = mark_stripe_bucket(c, new, i, fs_usage,
1064 journal_seq, flags);
1069 if (update_replicas(c, fs_usage, &m->r.e,
1070 ((s64) m->sectors * m->nr_redundant))) {
1073 bch2_bkey_val_to_text(&PBUF(buf), c, new);
1074 bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
1082 static int __reflink_p_frag_references(struct bkey_s_c_reflink_p p,
1083 u64 p_start, u64 p_end,
1084 u64 v_start, u64 v_end)
1086 if (p_start == p_end)
1089 p_start += le64_to_cpu(p.v->idx);
1090 p_end += le64_to_cpu(p.v->idx);
1092 if (p_end <= v_start)
1094 if (p_start >= v_end)
1099 static int reflink_p_frag_references(struct bkey_s_c_reflink_p p,
1103 return __reflink_p_frag_references(p, start, end,
1104 bkey_start_offset(k.k),
1108 static int __bch2_mark_reflink_p(struct bch_fs *c,
1109 struct bkey_s_c_reflink_p p,
1110 u64 idx, unsigned sectors,
1111 unsigned front_frag,
1116 struct reflink_gc *r;
1117 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1118 int frags_referenced;
1121 if (*r_idx >= c->reflink_gc_nr)
1123 r = genradix_ptr(&c->reflink_gc_table, *r_idx);
1126 if (r->offset > idx)
1132 __reflink_p_frag_references(p, 0, front_frag,
1133 r->offset - r->size, r->offset) +
1134 __reflink_p_frag_references(p, back_frag, p.k->size,
1135 r->offset - r->size, r->offset);
1137 if (frags_referenced == 2) {
1138 BUG_ON(!(flags & BTREE_TRIGGER_OVERWRITE_SPLIT));
1140 } else if (frags_referenced == 1) {
1141 BUG_ON(!(flags & BTREE_TRIGGER_OVERWRITE));
1145 BUG_ON((s64) r->refcount + add < 0);
1148 return min_t(u64, sectors, r->offset - idx);
1150 bch2_fs_inconsistent(c,
1151 "%llu:%llu len %u points to nonexistent indirect extent %llu",
1152 p.k->p.inode, p.k->p.offset, p.k->size, idx);
1153 bch2_inconsistent_error(c);
1157 static int bch2_mark_reflink_p(struct bch_fs *c,
1158 struct bkey_s_c_reflink_p p, unsigned offset,
1159 s64 sectors, unsigned flags)
1161 u64 idx = le64_to_cpu(p.v->idx) + offset;
1162 struct reflink_gc *ref;
1164 unsigned front_frag, back_frag;
1170 BUG_ON(offset + sectors > p.k->size);
1172 front_frag = offset;
1173 back_frag = offset + sectors;
1176 r = c->reflink_gc_nr;
1178 m = l + (r - l) / 2;
1180 ref = genradix_ptr(&c->reflink_gc_table, m);
1181 if (ref->offset <= idx)
1188 ret = __bch2_mark_reflink_p(c, p, idx, sectors,
1189 front_frag, back_frag, flags, &l);
1200 static int bch2_mark_key_locked(struct bch_fs *c,
1201 struct bkey_s_c old,
1202 struct bkey_s_c new,
1203 unsigned offset, s64 sectors,
1204 struct bch_fs_usage *fs_usage,
1205 u64 journal_seq, unsigned flags)
1207 struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1210 BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1214 if (!fs_usage || (flags & BTREE_TRIGGER_GC))
1215 fs_usage = fs_usage_ptr(c, journal_seq,
1216 flags & BTREE_TRIGGER_GC);
1218 switch (k.k->type) {
1219 case KEY_TYPE_alloc:
1220 case KEY_TYPE_alloc_v2:
1221 ret = bch2_mark_alloc(c, old, new, fs_usage, journal_seq, flags);
1223 case KEY_TYPE_btree_ptr:
1224 case KEY_TYPE_btree_ptr_v2:
1225 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1226 ? c->opts.btree_node_size
1227 : -c->opts.btree_node_size;
1229 ret = bch2_mark_extent(c, old, new, offset, sectors,
1230 BCH_DATA_btree, fs_usage, journal_seq, flags);
1232 case KEY_TYPE_extent:
1233 case KEY_TYPE_reflink_v:
1234 ret = bch2_mark_extent(c, old, new, offset, sectors,
1235 BCH_DATA_user, fs_usage, journal_seq, flags);
1237 case KEY_TYPE_stripe:
1238 ret = bch2_mark_stripe(c, old, new, fs_usage, journal_seq, flags);
1240 case KEY_TYPE_inode:
1241 fs_usage->nr_inodes += new.k->type == KEY_TYPE_inode;
1242 fs_usage->nr_inodes -= old.k->type == KEY_TYPE_inode;
1244 case KEY_TYPE_reservation: {
1245 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1247 sectors *= replicas;
1248 replicas = clamp_t(unsigned, replicas, 1,
1249 ARRAY_SIZE(fs_usage->persistent_reserved));
1251 fs_usage->reserved += sectors;
1252 fs_usage->persistent_reserved[replicas - 1] += sectors;
1255 case KEY_TYPE_reflink_p:
1256 ret = bch2_mark_reflink_p(c, bkey_s_c_to_reflink_p(k),
1257 offset, sectors, flags);
1266 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c new,
1267 unsigned offset, s64 sectors,
1268 struct bch_fs_usage *fs_usage,
1269 u64 journal_seq, unsigned flags)
1271 struct bkey deleted;
1272 struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
1275 bkey_init(&deleted);
1277 percpu_down_read(&c->mark_lock);
1278 ret = bch2_mark_key_locked(c, old, new, offset, sectors,
1279 fs_usage, journal_seq,
1280 BTREE_TRIGGER_INSERT|flags);
1281 percpu_up_read(&c->mark_lock);
1286 int bch2_mark_update(struct btree_trans *trans,
1287 struct btree_iter *iter,
1289 struct bch_fs_usage *fs_usage,
1292 struct bch_fs *c = trans->c;
1293 struct bkey_s_c old;
1294 struct bkey unpacked;
1297 if (unlikely(flags & BTREE_TRIGGER_NORUN))
1300 if (!btree_node_type_needs_gc(iter->btree_id))
1303 bkey_init(&unpacked);
1304 old = (struct bkey_s_c) { &unpacked, NULL };
1306 if (!btree_node_type_is_extents(iter->btree_id)) {
1307 /* iterators should be uptodate, shouldn't get errors here: */
1308 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
1309 old = bch2_btree_iter_peek_slot(iter);
1310 BUG_ON(bkey_err(old));
1312 struct bkey_cached *ck = (void *) iter->l[0].b;
1315 old = bkey_i_to_s_c(ck->k);
1318 if (old.k->type == new->k.type) {
1319 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1320 fs_usage, trans->journal_res.seq,
1321 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1324 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1325 fs_usage, trans->journal_res.seq,
1326 BTREE_TRIGGER_INSERT|flags);
1327 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1328 fs_usage, trans->journal_res.seq,
1329 BTREE_TRIGGER_OVERWRITE|flags);
1332 struct btree_iter *copy;
1334 BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
1335 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1337 fs_usage, trans->journal_res.seq,
1338 BTREE_TRIGGER_INSERT|flags);
1340 copy = bch2_trans_copy_iter(trans, iter);
1342 for_each_btree_key_continue(copy, 0, old, ret) {
1343 unsigned offset = 0;
1344 s64 sectors = -((s64) old.k->size);
1346 flags |= BTREE_TRIGGER_OVERWRITE;
1348 if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
1351 switch (bch2_extent_overlap(&new->k, old.k)) {
1352 case BCH_EXTENT_OVERLAP_ALL:
1354 sectors = -((s64) old.k->size);
1356 case BCH_EXTENT_OVERLAP_BACK:
1357 offset = bkey_start_offset(&new->k) -
1358 bkey_start_offset(old.k);
1359 sectors = bkey_start_offset(&new->k) -
1362 case BCH_EXTENT_OVERLAP_FRONT:
1364 sectors = bkey_start_offset(old.k) -
1367 case BCH_EXTENT_OVERLAP_MIDDLE:
1368 offset = bkey_start_offset(&new->k) -
1369 bkey_start_offset(old.k);
1370 sectors = -((s64) new->k.size);
1371 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1375 BUG_ON(sectors >= 0);
1377 ret = bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1378 offset, sectors, fs_usage,
1379 trans->journal_res.seq, flags) ?: 1;
1383 bch2_trans_iter_put(trans, copy);
1389 static noinline __cold
1390 void fs_usage_apply_warn(struct btree_trans *trans,
1391 unsigned disk_res_sectors,
1392 s64 should_not_have_added)
1394 struct bch_fs *c = trans->c;
1395 struct btree_insert_entry *i;
1398 bch_err(c, "disk usage increased %lli more than %u sectors reserved",
1399 should_not_have_added, disk_res_sectors);
1401 trans_for_each_update(trans, i) {
1402 pr_err("while inserting");
1403 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1405 pr_err("overlapping with");
1407 if (btree_iter_type(i->iter) != BTREE_ITER_CACHED) {
1408 struct btree_iter *copy = bch2_trans_copy_iter(trans, i->iter);
1412 for_each_btree_key_continue(copy, 0, k, ret) {
1413 if (btree_node_type_is_extents(i->iter->btree_id)
1414 ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
1415 : bkey_cmp(i->k->k.p, k.k->p))
1418 bch2_bkey_val_to_text(&PBUF(buf), c, k);
1421 bch2_trans_iter_put(trans, copy);
1423 struct bkey_cached *ck = (void *) i->iter->l[0].b;
1426 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
1434 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1435 struct replicas_delta_list *deltas)
1437 struct bch_fs *c = trans->c;
1438 static int warned_disk_usage = 0;
1440 unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1441 struct replicas_delta *d = deltas->d;
1442 struct replicas_delta *top = (void *) deltas->d + deltas->used;
1443 struct bch_fs_usage *dst;
1444 s64 added = 0, should_not_have_added;
1447 percpu_rwsem_assert_held(&c->mark_lock);
1450 dst = fs_usage_ptr(c, trans->journal_res.seq, false);
1452 for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
1453 switch (d->r.data_type) {
1454 case BCH_DATA_btree:
1456 case BCH_DATA_parity:
1460 BUG_ON(update_replicas(c, dst, &d->r, d->delta));
1463 dst->nr_inodes += deltas->nr_inodes;
1465 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
1466 added += deltas->persistent_reserved[i];
1467 dst->reserved += deltas->persistent_reserved[i];
1468 dst->persistent_reserved[i] += deltas->persistent_reserved[i];
1472 * Not allowed to reduce sectors_available except by getting a
1475 should_not_have_added = added - (s64) disk_res_sectors;
1476 if (unlikely(should_not_have_added > 0)) {
1477 atomic64_sub(should_not_have_added, &c->sectors_available);
1478 added -= should_not_have_added;
1483 trans->disk_res->sectors -= added;
1484 this_cpu_sub(*c->online_reserved, added);
1489 if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
1490 fs_usage_apply_warn(trans, disk_res_sectors, should_not_have_added);
1495 static struct btree_iter *trans_get_update(struct btree_trans *trans,
1496 enum btree_id btree_id, struct bpos pos,
1499 struct btree_insert_entry *i;
1501 trans_for_each_update(trans, i)
1502 if (i->iter->btree_id == btree_id &&
1503 (btree_node_type_is_extents(btree_id)
1504 ? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
1505 bkey_cmp(pos, i->k->k.p) < 0
1506 : !bkey_cmp(pos, i->iter->pos))) {
1507 *k = bkey_i_to_s_c(i->k);
1510 BUG_ON(btree_iter_live(trans, i->iter));
1511 trans->iters_live |= 1ULL << i->iter->idx;
1518 static int trans_get_key(struct btree_trans *trans,
1519 enum btree_id btree_id, struct bpos pos,
1520 struct btree_iter **iter,
1523 unsigned flags = btree_id != BTREE_ID_alloc
1525 : BTREE_ITER_CACHED;
1528 *iter = trans_get_update(trans, btree_id, pos, k);
1532 *iter = bch2_trans_get_iter(trans, btree_id, pos,
1533 flags|BTREE_ITER_INTENT);
1534 *k = __bch2_btree_iter_peek(*iter, flags);
1537 bch2_trans_iter_put(trans, *iter);
1541 static struct bkey_alloc_buf *
1542 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter **_iter,
1543 const struct bch_extent_ptr *ptr,
1544 struct bkey_alloc_unpacked *u)
1546 struct bch_fs *c = trans->c;
1547 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1548 struct bpos pos = POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
1550 struct btree_iter *iter;
1552 struct bkey_alloc_buf *a;
1555 a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
1559 iter = trans_get_update(trans, BTREE_ID_alloc, pos, &k);
1561 *u = bch2_alloc_unpack(k);
1563 iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, pos,
1565 BTREE_ITER_CACHED_NOFILL|
1567 ret = bch2_btree_iter_traverse(iter);
1569 bch2_trans_iter_put(trans, iter);
1570 return ERR_PTR(ret);
1573 percpu_down_read(&c->mark_lock);
1574 g = bucket(ca, pos.offset);
1575 *u = alloc_mem_to_key(iter, g, READ_ONCE(g->mark));
1576 percpu_up_read(&c->mark_lock);
1583 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1584 struct bkey_s_c k, struct extent_ptr_decoded p,
1585 s64 sectors, enum bch_data_type data_type)
1587 struct bch_fs *c = trans->c;
1588 struct btree_iter *iter;
1589 struct bkey_alloc_unpacked u;
1590 struct bkey_alloc_buf *a;
1593 a = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
1597 ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, u.gen, &u.data_type,
1598 &u.dirty_sectors, &u.cached_sectors);
1602 bch2_alloc_pack(c, a, u);
1603 bch2_trans_update(trans, iter, &a->k, 0);
1605 bch2_trans_iter_put(trans, iter);
1609 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1610 struct extent_ptr_decoded p,
1611 s64 sectors, enum bch_data_type data_type)
1613 struct bch_fs *c = trans->c;
1614 struct btree_iter *iter;
1616 struct bkey_i_stripe *s;
1617 struct bch_replicas_padded r;
1620 ret = trans_get_key(trans, BTREE_ID_stripes, POS(0, p.ec.idx), &iter, &k);
1624 if (k.k->type != KEY_TYPE_stripe) {
1625 bch2_fs_inconsistent(c,
1626 "pointer to nonexistent stripe %llu",
1628 bch2_inconsistent_error(c);
1633 if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) {
1634 bch2_fs_inconsistent(c,
1635 "stripe pointer doesn't match stripe %llu",
1641 s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1642 ret = PTR_ERR_OR_ZERO(s);
1646 bkey_reassemble(&s->k_i, k);
1647 stripe_blockcount_set(&s->v, p.ec.block,
1648 stripe_blockcount_get(&s->v, p.ec.block) +
1650 bch2_trans_update(trans, iter, &s->k_i, 0);
1652 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1653 r.e.data_type = data_type;
1654 update_replicas_list(trans, &r.e, sectors);
1656 bch2_trans_iter_put(trans, iter);
1660 static int bch2_trans_mark_extent(struct btree_trans *trans,
1661 struct bkey_s_c k, unsigned offset,
1662 s64 sectors, unsigned flags,
1663 enum bch_data_type data_type)
1665 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1666 const union bch_extent_entry *entry;
1667 struct extent_ptr_decoded p;
1668 struct bch_replicas_padded r;
1669 s64 dirty_sectors = 0;
1673 r.e.data_type = data_type;
1675 r.e.nr_required = 1;
1679 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1680 s64 disk_sectors = data_type == BCH_DATA_btree
1682 : ptr_disk_sectors_delta(p, offset, sectors, flags);
1684 ret = bch2_trans_mark_pointer(trans, k, p, disk_sectors,
1693 update_cached_sectors_list(trans, p.ptr.dev,
1695 } else if (!p.has_ec) {
1696 dirty_sectors += disk_sectors;
1697 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1699 ret = bch2_trans_mark_stripe_ptr(trans, p,
1700 disk_sectors, data_type);
1704 r.e.nr_required = 0;
1709 update_replicas_list(trans, &r.e, dirty_sectors);
1714 static int bch2_trans_mark_stripe_alloc_ref(struct btree_trans *trans,
1715 struct bkey_s_c_stripe s,
1716 unsigned idx, bool deleting)
1718 struct bch_fs *c = trans->c;
1719 const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
1720 struct bkey_alloc_buf *a;
1721 struct btree_iter *iter;
1722 struct bkey_alloc_unpacked u;
1723 bool parity = idx >= s.v->nr_blocks - s.v->nr_redundant;
1726 a = bch2_trans_start_alloc_update(trans, &iter, ptr, &u);
1731 s64 sectors = le16_to_cpu(s.v->sectors);
1736 u.dirty_sectors += sectors;
1737 u.data_type = u.dirty_sectors
1743 if (bch2_fs_inconsistent_on(u.stripe && u.stripe != s.k->p.offset, c,
1744 "bucket %llu:%llu gen %u: multiple stripes using same bucket (%u, %llu)",
1745 iter->pos.inode, iter->pos.offset, u.gen,
1746 u.stripe, s.k->p.offset)) {
1751 u.stripe = s.k->p.offset;
1752 u.stripe_redundancy = s.v->nr_redundant;
1755 u.stripe_redundancy = 0;
1758 bch2_alloc_pack(c, a, u);
1759 bch2_trans_update(trans, iter, &a->k, 0);
1761 bch2_trans_iter_put(trans, iter);
1765 static int bch2_trans_mark_stripe(struct btree_trans *trans,
1766 struct bkey_s_c old, struct bkey_s_c new,
1769 struct bkey_s_c_stripe old_s = { .k = NULL };
1770 struct bkey_s_c_stripe new_s = { .k = NULL };
1771 struct bch_replicas_padded r;
1775 if (old.k->type == KEY_TYPE_stripe)
1776 old_s = bkey_s_c_to_stripe(old);
1777 if (new.k->type == KEY_TYPE_stripe)
1778 new_s = bkey_s_c_to_stripe(new);
1781 * If the pointers aren't changing, we don't need to do anything:
1783 if (new_s.k && old_s.k &&
1784 new_s.v->nr_blocks == old_s.v->nr_blocks &&
1785 new_s.v->nr_redundant == old_s.v->nr_redundant &&
1786 !memcmp(old_s.v->ptrs, new_s.v->ptrs,
1787 new_s.v->nr_blocks * sizeof(struct bch_extent_ptr)))
1791 s64 sectors = le16_to_cpu(new_s.v->sectors);
1793 bch2_bkey_to_replicas(&r.e, new);
1794 update_replicas_list(trans, &r.e, sectors * new_s.v->nr_redundant);
1796 for (i = 0; i < new_s.v->nr_blocks; i++) {
1797 ret = bch2_trans_mark_stripe_alloc_ref(trans, new_s,
1805 s64 sectors = -((s64) le16_to_cpu(old_s.v->sectors));
1807 bch2_bkey_to_replicas(&r.e, old);
1808 update_replicas_list(trans, &r.e, sectors * old_s.v->nr_redundant);
1810 for (i = 0; i < old_s.v->nr_blocks; i++) {
1811 ret = bch2_trans_mark_stripe_alloc_ref(trans, old_s,
1821 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1822 struct bkey_s_c_reflink_p p,
1823 u64 idx, unsigned sectors,
1824 unsigned front_frag,
1828 struct bch_fs *c = trans->c;
1829 struct btree_iter *iter;
1833 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1834 int frags_referenced;
1837 ret = trans_get_key(trans, BTREE_ID_reflink,
1838 POS(0, idx), &iter, &k);
1842 sectors = min_t(u64, sectors, k.k->p.offset - idx);
1845 reflink_p_frag_references(p, 0, front_frag, k) +
1846 reflink_p_frag_references(p, back_frag, p.k->size, k);
1848 if (frags_referenced == 2) {
1849 BUG_ON(!(flags & BTREE_TRIGGER_OVERWRITE_SPLIT));
1851 } else if (frags_referenced == 1) {
1852 BUG_ON(!(flags & BTREE_TRIGGER_OVERWRITE));
1856 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1857 ret = PTR_ERR_OR_ZERO(n);
1861 bkey_reassemble(n, k);
1863 refcount = bkey_refcount(n);
1865 bch2_fs_inconsistent(c,
1866 "%llu:%llu len %u points to nonexistent indirect extent %llu",
1867 p.k->p.inode, p.k->p.offset, p.k->size, idx);
1868 bch2_inconsistent_error(c);
1873 BUG_ON(!*refcount && (flags & BTREE_TRIGGER_OVERWRITE));
1874 le64_add_cpu(refcount, add);
1877 n->k.type = KEY_TYPE_deleted;
1878 set_bkey_val_u64s(&n->k, 0);
1881 bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1882 ret = bch2_trans_update(trans, iter, n, 0);
1888 bch2_trans_iter_put(trans, iter);
1892 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1893 struct bkey_s_c_reflink_p p, unsigned offset,
1894 s64 sectors, unsigned flags)
1896 u64 idx = le64_to_cpu(p.v->idx) + offset;
1897 unsigned front_frag, back_frag;
1903 BUG_ON(offset + sectors > p.k->size);
1905 front_frag = offset;
1906 back_frag = offset + sectors;
1909 ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors,
1910 front_frag, back_frag, flags);
1921 int bch2_trans_mark_key(struct btree_trans *trans,
1922 struct bkey_s_c old,
1923 struct bkey_s_c new,
1924 unsigned offset, s64 sectors, unsigned flags)
1926 struct bch_fs *c = trans->c;
1927 struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1928 struct replicas_delta_list *d;
1930 BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1932 switch (k.k->type) {
1933 case KEY_TYPE_btree_ptr:
1934 case KEY_TYPE_btree_ptr_v2:
1935 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1936 ? c->opts.btree_node_size
1937 : -c->opts.btree_node_size;
1939 return bch2_trans_mark_extent(trans, k, offset, sectors,
1940 flags, BCH_DATA_btree);
1941 case KEY_TYPE_extent:
1942 case KEY_TYPE_reflink_v:
1943 return bch2_trans_mark_extent(trans, k, offset, sectors,
1944 flags, BCH_DATA_user);
1945 case KEY_TYPE_stripe:
1946 return bch2_trans_mark_stripe(trans, old, new, flags);
1947 case KEY_TYPE_inode: {
1948 int nr = (new.k->type == KEY_TYPE_inode) -
1949 (old.k->type == KEY_TYPE_inode);
1952 d = replicas_deltas_realloc(trans, 0);
1958 case KEY_TYPE_reservation: {
1959 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1961 d = replicas_deltas_realloc(trans, 0);
1963 sectors *= replicas;
1964 replicas = clamp_t(unsigned, replicas, 1,
1965 ARRAY_SIZE(d->persistent_reserved));
1967 d->persistent_reserved[replicas - 1] += sectors;
1970 case KEY_TYPE_reflink_p:
1971 return bch2_trans_mark_reflink_p(trans,
1972 bkey_s_c_to_reflink_p(k),
1973 offset, sectors, flags);
1979 int bch2_trans_mark_update(struct btree_trans *trans,
1980 struct btree_iter *iter,
1984 struct bkey_s_c old;
1987 if (unlikely(flags & BTREE_TRIGGER_NORUN))
1990 if (!btree_node_type_needs_gc(iter->btree_id))
1993 if (!btree_node_type_is_extents(iter->btree_id)) {
1994 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
1995 old = bch2_btree_iter_peek_slot(iter);
1996 ret = bkey_err(old);
2000 struct bkey_cached *ck = (void *) iter->l[0].b;
2003 old = bkey_i_to_s_c(ck->k);
2006 if (old.k->type == new->k.type) {
2007 ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
2008 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
2010 ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
2011 BTREE_TRIGGER_INSERT|flags) ?:
2012 bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
2013 BTREE_TRIGGER_OVERWRITE|flags);
2016 struct btree_iter *copy;
2019 EBUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
2022 old = (struct bkey_s_c) { &_old, NULL };
2024 ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
2026 BTREE_TRIGGER_INSERT);
2030 copy = bch2_trans_copy_iter(trans, iter);
2032 for_each_btree_key_continue(copy, 0, old, ret) {
2033 unsigned offset = 0;
2034 s64 sectors = -((s64) old.k->size);
2036 flags |= BTREE_TRIGGER_OVERWRITE;
2038 if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
2041 switch (bch2_extent_overlap(&new->k, old.k)) {
2042 case BCH_EXTENT_OVERLAP_ALL:
2044 sectors = -((s64) old.k->size);
2046 case BCH_EXTENT_OVERLAP_BACK:
2047 offset = bkey_start_offset(&new->k) -
2048 bkey_start_offset(old.k);
2049 sectors = bkey_start_offset(&new->k) -
2052 case BCH_EXTENT_OVERLAP_FRONT:
2054 sectors = bkey_start_offset(old.k) -
2057 case BCH_EXTENT_OVERLAP_MIDDLE:
2058 offset = bkey_start_offset(&new->k) -
2059 bkey_start_offset(old.k);
2060 sectors = -((s64) new->k.size);
2061 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
2065 BUG_ON(sectors >= 0);
2067 ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
2068 offset, sectors, flags);
2072 bch2_trans_iter_put(trans, copy);
2078 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
2079 struct bch_dev *ca, size_t b,
2080 enum bch_data_type type,
2083 struct bch_fs *c = trans->c;
2084 struct btree_iter *iter;
2085 struct bkey_alloc_unpacked u;
2086 struct bkey_alloc_buf *a;
2087 struct bch_extent_ptr ptr = {
2089 .offset = bucket_to_sector(ca, b),
2094 * Backup superblock might be past the end of our normal usable space:
2096 if (b >= ca->mi.nbuckets)
2099 a = bch2_trans_start_alloc_update(trans, &iter, &ptr, &u);
2103 if (u.data_type && u.data_type != type) {
2104 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
2105 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
2107 iter->pos.inode, iter->pos.offset, u.gen,
2108 bch2_data_types[u.data_type],
2109 bch2_data_types[type],
2110 bch2_data_types[type]);
2116 u.dirty_sectors = sectors;
2118 bch2_alloc_pack(c, a, u);
2119 bch2_trans_update(trans, iter, &a->k, 0);
2121 bch2_trans_iter_put(trans, iter);
2125 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
2126 struct bch_dev *ca, size_t b,
2127 enum bch_data_type type,
2130 return __bch2_trans_do(trans, NULL, NULL, 0,
2131 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
2134 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
2137 enum bch_data_type type,
2138 u64 *bucket, unsigned *bucket_sectors)
2141 u64 b = sector_to_bucket(ca, start);
2143 min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
2145 if (b != *bucket && *bucket_sectors) {
2146 int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
2147 type, *bucket_sectors);
2151 *bucket_sectors = 0;
2155 *bucket_sectors += sectors;
2157 } while (start < end);
2162 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
2165 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
2167 unsigned i, bucket_sectors = 0;
2170 for (i = 0; i < layout->nr_superblocks; i++) {
2171 u64 offset = le64_to_cpu(layout->sb_offset[i]);
2173 if (offset == BCH_SB_SECTOR) {
2174 ret = bch2_trans_mark_metadata_sectors(trans, ca,
2176 BCH_DATA_sb, &bucket, &bucket_sectors);
2181 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
2182 offset + (1 << layout->sb_max_size_bits),
2183 BCH_DATA_sb, &bucket, &bucket_sectors);
2188 if (bucket_sectors) {
2189 ret = bch2_trans_mark_metadata_bucket(trans, ca,
2190 bucket, BCH_DATA_sb, bucket_sectors);
2195 for (i = 0; i < ca->journal.nr; i++) {
2196 ret = bch2_trans_mark_metadata_bucket(trans, ca,
2197 ca->journal.buckets[i],
2198 BCH_DATA_journal, ca->mi.bucket_size);
2206 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
2208 return bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
2209 __bch2_trans_mark_dev_sb(&trans, ca));
2212 /* Disk reservations: */
2214 #define SECTORS_CACHE 1024
2216 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
2217 u64 sectors, int flags)
2219 struct bch_fs_pcpu *pcpu;
2221 s64 sectors_available;
2224 percpu_down_read(&c->mark_lock);
2226 pcpu = this_cpu_ptr(c->pcpu);
2228 if (sectors <= pcpu->sectors_available)
2231 v = atomic64_read(&c->sectors_available);
2234 get = min((u64) sectors + SECTORS_CACHE, old);
2236 if (get < sectors) {
2240 } while ((v = atomic64_cmpxchg(&c->sectors_available,
2241 old, old - get)) != old);
2243 pcpu->sectors_available += get;
2246 pcpu->sectors_available -= sectors;
2247 this_cpu_add(*c->online_reserved, sectors);
2248 res->sectors += sectors;
2251 percpu_up_read(&c->mark_lock);
2255 mutex_lock(&c->sectors_available_lock);
2257 percpu_u64_set(&c->pcpu->sectors_available, 0);
2258 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
2260 if (sectors <= sectors_available ||
2261 (flags & BCH_DISK_RESERVATION_NOFAIL)) {
2262 atomic64_set(&c->sectors_available,
2263 max_t(s64, 0, sectors_available - sectors));
2264 this_cpu_add(*c->online_reserved, sectors);
2265 res->sectors += sectors;
2268 atomic64_set(&c->sectors_available, sectors_available);
2272 mutex_unlock(&c->sectors_available_lock);
2273 percpu_up_read(&c->mark_lock);
2278 /* Startup/shutdown: */
2280 static void buckets_free_rcu(struct rcu_head *rcu)
2282 struct bucket_array *buckets =
2283 container_of(rcu, struct bucket_array, rcu);
2286 sizeof(struct bucket_array) +
2287 buckets->nbuckets * sizeof(struct bucket));
2290 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2292 struct bucket_array *buckets = NULL, *old_buckets = NULL;
2293 unsigned long *buckets_nouse = NULL;
2294 alloc_fifo free[RESERVE_NR];
2295 alloc_fifo free_inc;
2296 alloc_heap alloc_heap;
2298 size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
2299 ca->mi.bucket_size / c->opts.btree_node_size);
2300 /* XXX: these should be tunable */
2301 size_t reserve_none = max_t(size_t, 1, nbuckets >> 9);
2302 size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 6);
2303 size_t free_inc_nr = max(max_t(size_t, 1, nbuckets >> 12),
2305 bool resize = ca->buckets[0] != NULL;
2309 memset(&free, 0, sizeof(free));
2310 memset(&free_inc, 0, sizeof(free_inc));
2311 memset(&alloc_heap, 0, sizeof(alloc_heap));
2313 if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
2314 nbuckets * sizeof(struct bucket),
2315 GFP_KERNEL|__GFP_ZERO)) ||
2316 !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2317 sizeof(unsigned long),
2318 GFP_KERNEL|__GFP_ZERO)) ||
2319 !init_fifo(&free[RESERVE_MOVINGGC],
2320 copygc_reserve, GFP_KERNEL) ||
2321 !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
2322 !init_fifo(&free_inc, free_inc_nr, GFP_KERNEL) ||
2323 !init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
2326 buckets->first_bucket = ca->mi.first_bucket;
2327 buckets->nbuckets = nbuckets;
2329 bch2_copygc_stop(c);
2332 down_write(&c->gc_lock);
2333 down_write(&ca->bucket_lock);
2334 percpu_down_write(&c->mark_lock);
2337 old_buckets = bucket_array(ca);
2340 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
2344 n * sizeof(struct bucket));
2345 memcpy(buckets_nouse,
2347 BITS_TO_LONGS(n) * sizeof(unsigned long));
2350 rcu_assign_pointer(ca->buckets[0], buckets);
2351 buckets = old_buckets;
2353 swap(ca->buckets_nouse, buckets_nouse);
2356 percpu_up_write(&c->mark_lock);
2357 up_write(&c->gc_lock);
2360 spin_lock(&c->freelist_lock);
2361 for (i = 0; i < RESERVE_NR; i++) {
2362 fifo_move(&free[i], &ca->free[i]);
2363 swap(ca->free[i], free[i]);
2365 fifo_move(&free_inc, &ca->free_inc);
2366 swap(ca->free_inc, free_inc);
2367 spin_unlock(&c->freelist_lock);
2369 /* with gc lock held, alloc_heap can't be in use: */
2370 swap(ca->alloc_heap, alloc_heap);
2372 nbuckets = ca->mi.nbuckets;
2375 up_write(&ca->bucket_lock);
2379 free_heap(&alloc_heap);
2380 free_fifo(&free_inc);
2381 for (i = 0; i < RESERVE_NR; i++)
2382 free_fifo(&free[i]);
2383 kvpfree(buckets_nouse,
2384 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2386 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2391 void bch2_dev_buckets_free(struct bch_dev *ca)
2395 free_heap(&ca->alloc_heap);
2396 free_fifo(&ca->free_inc);
2397 for (i = 0; i < RESERVE_NR; i++)
2398 free_fifo(&ca->free[i]);
2399 kvpfree(ca->buckets_nouse,
2400 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2401 kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2402 sizeof(struct bucket_array) +
2403 ca->mi.nbuckets * sizeof(struct bucket));
2405 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
2406 free_percpu(ca->usage[i]);
2407 kfree(ca->usage_base);
2410 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2414 ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
2415 if (!ca->usage_base)
2418 for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
2419 ca->usage[i] = alloc_percpu(struct bch_dev_usage);
2424 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;