1 // SPDX-License-Identifier: GPL-2.0
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
9 #include "alloc_background.h"
10 #include "backpointers.h"
13 #include "btree_update.h"
15 #include "buckets_waiting_for_journal.h"
23 #include "subvolume.h"
26 #include <linux/preempt.h>
28 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
29 enum bch_data_type data_type,
34 fs_usage->btree += sectors;
38 fs_usage->data += sectors;
41 fs_usage->cached += sectors;
48 void bch2_fs_usage_initialize(struct bch_fs *c)
50 struct bch_fs_usage *usage;
54 percpu_down_write(&c->mark_lock);
55 usage = c->usage_base;
57 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
58 bch2_fs_usage_acc_to_base(c, i);
60 for (i = 0; i < BCH_REPLICAS_MAX; i++)
61 usage->reserved += usage->persistent_reserved[i];
63 for (i = 0; i < c->replicas.nr; i++) {
64 struct bch_replicas_entry *e =
65 cpu_replicas_entry(&c->replicas, i);
67 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
70 for_each_member_device(ca, c, i) {
71 struct bch_dev_usage dev = bch2_dev_usage_read(ca);
73 usage->hidden += (dev.d[BCH_DATA_sb].buckets +
74 dev.d[BCH_DATA_journal].buckets) *
78 percpu_up_write(&c->mark_lock);
81 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
85 BUG_ON(!gc && !journal_seq);
87 return this_cpu_ptr(gc
89 : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
92 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
94 struct bch_fs *c = ca->fs;
95 unsigned seq, i, u64s = dev_usage_u64s();
98 seq = read_seqcount_begin(&c->usage_lock);
99 memcpy(usage, ca->usage_base, u64s * sizeof(u64));
100 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
101 acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage[i], u64s);
102 } while (read_seqcount_retry(&c->usage_lock, seq));
105 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
106 unsigned journal_seq,
109 percpu_rwsem_assert_held(&c->mark_lock);
110 BUG_ON(!gc && !journal_seq);
112 return this_cpu_ptr(gc
114 : c->usage[journal_seq & JOURNAL_BUF_MASK]);
117 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
119 ssize_t offset = v - (u64 *) c->usage_base;
123 BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
124 percpu_rwsem_assert_held(&c->mark_lock);
127 seq = read_seqcount_begin(&c->usage_lock);
130 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
131 ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
132 } while (read_seqcount_retry(&c->usage_lock, seq));
137 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
139 struct bch_fs_usage_online *ret;
140 unsigned nr_replicas = READ_ONCE(c->replicas.nr);
143 ret = kmalloc(__fs_usage_online_u64s(nr_replicas) * sizeof(u64), GFP_KERNEL);
147 percpu_down_read(&c->mark_lock);
149 if (nr_replicas != c->replicas.nr) {
150 nr_replicas = c->replicas.nr;
151 percpu_up_read(&c->mark_lock);
156 ret->online_reserved = percpu_u64_get(c->online_reserved);
159 seq = read_seqcount_begin(&c->usage_lock);
160 unsafe_memcpy(&ret->u, c->usage_base,
161 __fs_usage_u64s(nr_replicas) * sizeof(u64),
162 "embedded variable length struct");
163 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
164 acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i],
165 __fs_usage_u64s(nr_replicas));
166 } while (read_seqcount_retry(&c->usage_lock, seq));
171 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
174 unsigned i, u64s = fs_usage_u64s(c);
176 BUG_ON(idx >= ARRAY_SIZE(c->usage));
179 write_seqcount_begin(&c->usage_lock);
181 acc_u64s_percpu((u64 *) c->usage_base,
182 (u64 __percpu *) c->usage[idx], u64s);
183 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
186 for_each_member_device_rcu(ca, c, i, NULL) {
187 u64s = dev_usage_u64s();
189 acc_u64s_percpu((u64 *) ca->usage_base,
190 (u64 __percpu *) ca->usage[idx], u64s);
191 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
195 write_seqcount_end(&c->usage_lock);
199 void bch2_fs_usage_to_text(struct printbuf *out,
201 struct bch_fs_usage_online *fs_usage)
205 prt_printf(out, "capacity:\t\t\t%llu\n", c->capacity);
207 prt_printf(out, "hidden:\t\t\t\t%llu\n",
209 prt_printf(out, "data:\t\t\t\t%llu\n",
211 prt_printf(out, "cached:\t\t\t\t%llu\n",
213 prt_printf(out, "reserved:\t\t\t%llu\n",
214 fs_usage->u.reserved);
215 prt_printf(out, "nr_inodes:\t\t\t%llu\n",
216 fs_usage->u.nr_inodes);
217 prt_printf(out, "online reserved:\t\t%llu\n",
218 fs_usage->online_reserved);
221 i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
223 prt_printf(out, "%u replicas:\n", i + 1);
224 prt_printf(out, "\treserved:\t\t%llu\n",
225 fs_usage->u.persistent_reserved[i]);
228 for (i = 0; i < c->replicas.nr; i++) {
229 struct bch_replicas_entry *e =
230 cpu_replicas_entry(&c->replicas, i);
232 prt_printf(out, "\t");
233 bch2_replicas_entry_to_text(out, e);
234 prt_printf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
238 static u64 reserve_factor(u64 r)
240 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
243 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
245 return min(fs_usage->u.hidden +
248 reserve_factor(fs_usage->u.reserved +
249 fs_usage->online_reserved),
253 static struct bch_fs_usage_short
254 __bch2_fs_usage_read_short(struct bch_fs *c)
256 struct bch_fs_usage_short ret;
259 ret.capacity = c->capacity -
260 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
262 data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
263 bch2_fs_usage_read_one(c, &c->usage_base->btree);
264 reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
265 percpu_u64_get(c->online_reserved);
267 ret.used = min(ret.capacity, data + reserve_factor(reserved));
268 ret.free = ret.capacity - ret.used;
270 ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
275 struct bch_fs_usage_short
276 bch2_fs_usage_read_short(struct bch_fs *c)
278 struct bch_fs_usage_short ret;
280 percpu_down_read(&c->mark_lock);
281 ret = __bch2_fs_usage_read_short(c);
282 percpu_up_read(&c->mark_lock);
287 void bch2_dev_usage_init(struct bch_dev *ca)
289 ca->usage_base->d[BCH_DATA_free].buckets = ca->mi.nbuckets - ca->mi.first_bucket;
292 static inline int bucket_sectors_fragmented(struct bch_dev *ca,
293 struct bch_alloc_v4 a)
295 return a.dirty_sectors
296 ? max(0, (int) ca->mi.bucket_size - (int) a.dirty_sectors)
300 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
301 struct bch_alloc_v4 old,
302 struct bch_alloc_v4 new,
303 u64 journal_seq, bool gc)
305 struct bch_fs_usage *fs_usage;
306 struct bch_dev_usage *u;
309 fs_usage = fs_usage_ptr(c, journal_seq, gc);
311 if (data_type_is_hidden(old.data_type))
312 fs_usage->hidden -= ca->mi.bucket_size;
313 if (data_type_is_hidden(new.data_type))
314 fs_usage->hidden += ca->mi.bucket_size;
316 u = dev_usage_ptr(ca, journal_seq, gc);
318 u->d[old.data_type].buckets--;
319 u->d[new.data_type].buckets++;
321 u->buckets_ec -= (int) !!old.stripe;
322 u->buckets_ec += (int) !!new.stripe;
324 u->d[old.data_type].sectors -= old.dirty_sectors;
325 u->d[new.data_type].sectors += new.dirty_sectors;
327 u->d[BCH_DATA_cached].sectors += new.cached_sectors;
328 u->d[BCH_DATA_cached].sectors -= old.cached_sectors;
330 u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old);
331 u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new);
336 static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
337 struct bucket old, struct bucket new,
338 u64 journal_seq, bool gc)
340 struct bch_alloc_v4 old_a = {
342 .data_type = old.data_type,
343 .dirty_sectors = old.dirty_sectors,
344 .cached_sectors = old.cached_sectors,
345 .stripe = old.stripe,
347 struct bch_alloc_v4 new_a = {
349 .data_type = new.data_type,
350 .dirty_sectors = new.dirty_sectors,
351 .cached_sectors = new.cached_sectors,
352 .stripe = new.stripe,
355 bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
358 static inline int __update_replicas(struct bch_fs *c,
359 struct bch_fs_usage *fs_usage,
360 struct bch_replicas_entry *r,
363 int idx = bch2_replicas_entry_idx(c, r);
368 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
369 fs_usage->replicas[idx] += sectors;
373 static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
374 struct bch_replicas_entry *r, s64 sectors,
375 unsigned journal_seq, bool gc)
377 struct bch_fs_usage *fs_usage;
379 struct printbuf buf = PRINTBUF;
381 percpu_down_read(&c->mark_lock);
384 idx = bch2_replicas_entry_idx(c, r);
386 fsck_err(c, "no replicas entry\n"
388 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
389 percpu_up_read(&c->mark_lock);
390 ret = bch2_mark_replicas(c, r);
391 percpu_down_read(&c->mark_lock);
395 idx = bch2_replicas_entry_idx(c, r);
403 fs_usage = fs_usage_ptr(c, journal_seq, gc);
404 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
405 fs_usage->replicas[idx] += sectors;
409 percpu_up_read(&c->mark_lock);
414 static inline int update_cached_sectors(struct bch_fs *c,
416 unsigned dev, s64 sectors,
417 unsigned journal_seq, bool gc)
419 struct bch_replicas_padded r;
421 bch2_replicas_entry_cached(&r.e, dev);
423 return update_replicas(c, k, &r.e, sectors, journal_seq, gc);
426 static int __replicas_deltas_realloc(struct btree_trans *trans, unsigned more,
429 struct replicas_delta_list *d = trans->fs_usage_deltas;
430 unsigned new_size = d ? (d->size + more) * 2 : 128;
431 unsigned alloc_size = sizeof(*d) + new_size;
433 WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
435 if (!d || d->used + more > d->size) {
436 d = krealloc(d, alloc_size, gfp|__GFP_ZERO);
439 if (alloc_size > REPLICAS_DELTA_LIST_MAX)
442 d = mempool_alloc(&trans->c->replicas_delta_pool, gfp);
446 memset(d, 0, REPLICAS_DELTA_LIST_MAX);
448 if (trans->fs_usage_deltas)
449 memcpy(d, trans->fs_usage_deltas,
450 trans->fs_usage_deltas->size + sizeof(*d));
452 new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
453 kfree(trans->fs_usage_deltas);
457 trans->fs_usage_deltas = d;
463 static int replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
465 return allocate_dropping_locks_errcode(trans,
466 __replicas_deltas_realloc(trans, more, _gfp));
469 static inline int update_replicas_list(struct btree_trans *trans,
470 struct bch_replicas_entry *r,
473 struct replicas_delta_list *d;
474 struct replicas_delta *n;
481 b = replicas_entry_bytes(r) + 8;
482 ret = replicas_deltas_realloc(trans, b);
486 d = trans->fs_usage_deltas;
487 n = (void *) d->d + d->used;
489 memcpy((void *) n + offsetof(struct replicas_delta, r),
490 r, replicas_entry_bytes(r));
491 bch2_replicas_entry_sort(&n->r);
496 static inline int update_cached_sectors_list(struct btree_trans *trans,
497 unsigned dev, s64 sectors)
499 struct bch_replicas_padded r;
501 bch2_replicas_entry_cached(&r.e, dev);
503 return update_replicas_list(trans, &r.e, sectors);
506 int bch2_mark_alloc(struct btree_trans *trans,
507 enum btree_id btree, unsigned level,
508 struct bkey_s_c old, struct bkey_s_c new,
511 bool gc = flags & BTREE_TRIGGER_GC;
512 u64 journal_seq = trans->journal_res.seq;
513 u64 bucket_journal_seq;
514 struct bch_fs *c = trans->c;
515 struct bch_alloc_v4 old_a_convert, new_a_convert;
516 const struct bch_alloc_v4 *old_a, *new_a;
521 * alloc btree is read in by bch2_alloc_read, not gc:
523 if ((flags & BTREE_TRIGGER_GC) &&
524 !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
527 if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
528 "alloc key for invalid device or bucket"))
531 ca = bch_dev_bkey_exists(c, new.k->p.inode);
533 old_a = bch2_alloc_to_v4(old, &old_a_convert);
534 new_a = bch2_alloc_to_v4(new, &new_a_convert);
536 bucket_journal_seq = new_a->journal_seq;
538 if ((flags & BTREE_TRIGGER_INSERT) &&
539 data_type_is_empty(old_a->data_type) !=
540 data_type_is_empty(new_a->data_type) &&
541 new.k->type == KEY_TYPE_alloc_v4) {
542 struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
544 EBUG_ON(!journal_seq);
547 * If the btree updates referring to a bucket weren't flushed
548 * before the bucket became empty again, then the we don't have
549 * to wait on a journal flush before we can reuse the bucket:
551 v->journal_seq = bucket_journal_seq =
552 data_type_is_empty(new_a->data_type) &&
553 (journal_seq == v->journal_seq ||
554 bch2_journal_noflush_seq(&c->journal, v->journal_seq))
558 if (!data_type_is_empty(old_a->data_type) &&
559 data_type_is_empty(new_a->data_type) &&
560 bucket_journal_seq) {
561 ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
562 c->journal.flushed_seq_ondisk,
563 new.k->p.inode, new.k->p.offset,
566 bch2_fs_fatal_error(c,
567 "error setting bucket_needs_journal_commit: %i", ret);
572 percpu_down_read(&c->mark_lock);
573 if (!gc && new_a->gen != old_a->gen)
574 *bucket_gen(ca, new.k->p.offset) = new_a->gen;
576 bch2_dev_usage_update(c, ca, *old_a, *new_a, journal_seq, gc);
579 struct bucket *g = gc_bucket(ca, new.k->p.offset);
585 g->data_type = new_a->data_type;
586 g->stripe = new_a->stripe;
587 g->stripe_redundancy = new_a->stripe_redundancy;
588 g->dirty_sectors = new_a->dirty_sectors;
589 g->cached_sectors = new_a->cached_sectors;
593 percpu_up_read(&c->mark_lock);
596 * need to know if we're getting called from the invalidate path or
600 if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
601 old_a->cached_sectors) {
602 ret = update_cached_sectors(c, new, ca->dev_idx,
603 -((s64) old_a->cached_sectors),
606 bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
612 if (new_a->data_type == BCH_DATA_free &&
613 (!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
614 closure_wake_up(&c->freelist_wait);
616 if (new_a->data_type == BCH_DATA_need_discard &&
617 (!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
620 if (old_a->data_type != BCH_DATA_cached &&
621 new_a->data_type == BCH_DATA_cached &&
622 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
623 bch2_do_invalidates(c);
625 if (new_a->data_type == BCH_DATA_need_gc_gens)
631 int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
632 size_t b, enum bch_data_type data_type,
633 unsigned sectors, struct gc_pos pos,
636 struct bucket old, new, *g;
639 BUG_ON(!(flags & BTREE_TRIGGER_GC));
640 BUG_ON(data_type != BCH_DATA_sb &&
641 data_type != BCH_DATA_journal);
644 * Backup superblock might be past the end of our normal usable space:
646 if (b >= ca->mi.nbuckets)
649 percpu_down_read(&c->mark_lock);
650 g = gc_bucket(ca, b);
655 if (bch2_fs_inconsistent_on(g->data_type &&
656 g->data_type != data_type, c,
657 "different types of data in same bucket: %s, %s",
658 bch2_data_types[g->data_type],
659 bch2_data_types[data_type])) {
664 if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
665 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
666 ca->dev_idx, b, g->gen,
667 bch2_data_types[g->data_type ?: data_type],
668 g->dirty_sectors, sectors)) {
674 g->data_type = data_type;
675 g->dirty_sectors += sectors;
680 bch2_dev_usage_update_m(c, ca, old, new, 0, true);
681 percpu_up_read(&c->mark_lock);
685 static int check_bucket_ref(struct btree_trans *trans,
687 const struct bch_extent_ptr *ptr,
688 s64 sectors, enum bch_data_type ptr_data_type,
689 u8 b_gen, u8 bucket_data_type,
690 u32 dirty_sectors, u32 cached_sectors)
692 struct bch_fs *c = trans->c;
693 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
694 size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
695 u16 bucket_sectors = !ptr->cached
698 struct printbuf buf = PRINTBUF;
701 if (bucket_data_type == BCH_DATA_cached)
702 bucket_data_type = BCH_DATA_user;
704 if ((bucket_data_type == BCH_DATA_stripe && ptr_data_type == BCH_DATA_user) ||
705 (bucket_data_type == BCH_DATA_user && ptr_data_type == BCH_DATA_stripe))
706 bucket_data_type = ptr_data_type = BCH_DATA_stripe;
708 if (gen_after(ptr->gen, b_gen)) {
709 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
710 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
712 ptr->dev, bucket_nr, b_gen,
713 bch2_data_types[bucket_data_type ?: ptr_data_type],
715 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
720 if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
721 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
722 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
724 ptr->dev, bucket_nr, b_gen,
725 bch2_data_types[bucket_data_type ?: ptr_data_type],
727 (printbuf_reset(&buf),
728 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
733 if (b_gen != ptr->gen && !ptr->cached) {
734 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
735 "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
737 ptr->dev, bucket_nr, b_gen,
738 *bucket_gen(ca, bucket_nr),
739 bch2_data_types[bucket_data_type ?: ptr_data_type],
741 (printbuf_reset(&buf),
742 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
747 if (b_gen != ptr->gen) {
752 if (!data_type_is_empty(bucket_data_type) &&
754 bucket_data_type != ptr_data_type) {
755 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
756 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
758 ptr->dev, bucket_nr, b_gen,
759 bch2_data_types[bucket_data_type],
760 bch2_data_types[ptr_data_type],
761 (printbuf_reset(&buf),
762 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
767 if ((unsigned) (bucket_sectors + sectors) > U32_MAX) {
768 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
769 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
771 ptr->dev, bucket_nr, b_gen,
772 bch2_data_types[bucket_data_type ?: ptr_data_type],
773 bucket_sectors, sectors,
774 (printbuf_reset(&buf),
775 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
783 bch2_dump_trans_updates(trans);
787 static int mark_stripe_bucket(struct btree_trans *trans,
792 struct bch_fs *c = trans->c;
793 u64 journal_seq = trans->journal_res.seq;
794 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
795 unsigned nr_data = s->nr_blocks - s->nr_redundant;
796 bool parity = ptr_idx >= nr_data;
797 enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
798 s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
799 const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
800 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
801 struct bucket old, new, *g;
802 struct printbuf buf = PRINTBUF;
805 BUG_ON(!(flags & BTREE_TRIGGER_GC));
807 /* * XXX doesn't handle deletion */
809 percpu_down_read(&c->mark_lock);
811 g = PTR_GC_BUCKET(ca, ptr);
813 if (g->dirty_sectors ||
814 (g->stripe && g->stripe != k.k->p.offset)) {
815 bch2_fs_inconsistent(c,
816 "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
817 ptr->dev, PTR_BUCKET_NR(ca, ptr), g->gen,
818 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
826 ret = check_bucket_ref(trans, k, ptr, sectors, data_type,
827 g->gen, g->data_type,
828 g->dirty_sectors, g->cached_sectors);
832 g->data_type = data_type;
833 g->dirty_sectors += sectors;
835 g->stripe = k.k->p.offset;
836 g->stripe_redundancy = s->nr_redundant;
841 bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
842 percpu_up_read(&c->mark_lock);
847 static int __mark_pointer(struct btree_trans *trans,
849 const struct bch_extent_ptr *ptr,
850 s64 sectors, enum bch_data_type ptr_data_type,
851 u8 bucket_gen, u8 *bucket_data_type,
852 u32 *dirty_sectors, u32 *cached_sectors)
854 u32 *dst_sectors = !ptr->cached
857 int ret = check_bucket_ref(trans, k, ptr, sectors, ptr_data_type,
858 bucket_gen, *bucket_data_type,
859 *dirty_sectors, *cached_sectors);
864 *dst_sectors += sectors;
865 *bucket_data_type = *dirty_sectors || *cached_sectors
870 static int bch2_mark_pointer(struct btree_trans *trans,
871 enum btree_id btree_id, unsigned level,
873 struct extent_ptr_decoded p,
877 u64 journal_seq = trans->journal_res.seq;
878 struct bch_fs *c = trans->c;
879 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
880 struct bucket old, new, *g;
881 enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
885 BUG_ON(!(flags & BTREE_TRIGGER_GC));
887 percpu_down_read(&c->mark_lock);
888 g = PTR_GC_BUCKET(ca, &p.ptr);
892 bucket_data_type = g->data_type;
893 ret = __mark_pointer(trans, k, &p.ptr, sectors,
899 g->data_type = bucket_data_type;
904 bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
905 percpu_up_read(&c->mark_lock);
910 static int bch2_mark_stripe_ptr(struct btree_trans *trans,
912 struct bch_extent_stripe_ptr p,
913 enum bch_data_type data_type,
917 struct bch_fs *c = trans->c;
918 struct bch_replicas_padded r;
921 BUG_ON(!(flags & BTREE_TRIGGER_GC));
923 m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
925 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
927 return -BCH_ERR_ENOMEM_mark_stripe_ptr;
930 mutex_lock(&c->ec_stripes_heap_lock);
932 if (!m || !m->alive) {
933 mutex_unlock(&c->ec_stripes_heap_lock);
934 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
936 bch2_inconsistent_error(c);
940 m->block_sectors[p.block] += sectors;
943 mutex_unlock(&c->ec_stripes_heap_lock);
945 r.e.data_type = data_type;
946 update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
951 int bch2_mark_extent(struct btree_trans *trans,
952 enum btree_id btree_id, unsigned level,
953 struct bkey_s_c old, struct bkey_s_c new,
956 u64 journal_seq = trans->journal_res.seq;
957 struct bch_fs *c = trans->c;
958 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
959 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
960 const union bch_extent_entry *entry;
961 struct extent_ptr_decoded p;
962 struct bch_replicas_padded r;
963 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
966 s64 sectors = bkey_is_btree_ptr(k.k)
969 s64 dirty_sectors = 0;
973 BUG_ON(!(flags & BTREE_TRIGGER_GC));
975 r.e.data_type = data_type;
979 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
980 s64 disk_sectors = ptr_disk_sectors(sectors, p);
982 if (flags & BTREE_TRIGGER_OVERWRITE)
983 disk_sectors = -disk_sectors;
985 ret = bch2_mark_pointer(trans, btree_id, level, k, p, disk_sectors, flags);
993 ret = update_cached_sectors(c, k, p.ptr.dev,
994 disk_sectors, journal_seq, true);
996 bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
1001 } else if (!p.has_ec) {
1002 dirty_sectors += disk_sectors;
1003 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1005 ret = bch2_mark_stripe_ptr(trans, k, p.ec, data_type,
1006 disk_sectors, flags);
1011 * There may be other dirty pointers in this extent, but
1012 * if so they're not required for mounting if we have an
1013 * erasure coded pointer in this extent:
1015 r.e.nr_required = 0;
1020 ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true);
1022 struct printbuf buf = PRINTBUF;
1024 bch2_bkey_val_to_text(&buf, c, k);
1025 bch2_fs_fatal_error(c, "%s(): no replicas entry for %s", __func__, buf.buf);
1026 printbuf_exit(&buf);
1034 int bch2_mark_stripe(struct btree_trans *trans,
1035 enum btree_id btree_id, unsigned level,
1036 struct bkey_s_c old, struct bkey_s_c new,
1039 bool gc = flags & BTREE_TRIGGER_GC;
1040 u64 journal_seq = trans->journal_res.seq;
1041 struct bch_fs *c = trans->c;
1042 u64 idx = new.k->p.offset;
1043 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1044 ? bkey_s_c_to_stripe(old).v : NULL;
1045 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1046 ? bkey_s_c_to_stripe(new).v : NULL;
1050 BUG_ON(gc && old_s);
1053 struct stripe *m = genradix_ptr(&c->stripes, idx);
1056 struct printbuf buf1 = PRINTBUF;
1057 struct printbuf buf2 = PRINTBUF;
1059 bch2_bkey_val_to_text(&buf1, c, old);
1060 bch2_bkey_val_to_text(&buf2, c, new);
1061 bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
1063 "new %s", idx, buf1.buf, buf2.buf);
1064 printbuf_exit(&buf2);
1065 printbuf_exit(&buf1);
1066 bch2_inconsistent_error(c);
1071 bch2_stripes_heap_del(c, m, idx);
1073 memset(m, 0, sizeof(*m));
1075 m->sectors = le16_to_cpu(new_s->sectors);
1076 m->algorithm = new_s->algorithm;
1077 m->nr_blocks = new_s->nr_blocks;
1078 m->nr_redundant = new_s->nr_redundant;
1079 m->blocks_nonempty = 0;
1081 for (i = 0; i < new_s->nr_blocks; i++)
1082 m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
1085 bch2_stripes_heap_insert(c, m, idx);
1087 bch2_stripes_heap_update(c, m, idx);
1090 struct gc_stripe *m =
1091 genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
1094 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
1096 return -BCH_ERR_ENOMEM_mark_stripe;
1099 * This will be wrong when we bring back runtime gc: we should
1100 * be unmarking the old key and then marking the new key
1103 m->sectors = le16_to_cpu(new_s->sectors);
1104 m->nr_blocks = new_s->nr_blocks;
1105 m->nr_redundant = new_s->nr_redundant;
1107 for (i = 0; i < new_s->nr_blocks; i++)
1108 m->ptrs[i] = new_s->ptrs[i];
1110 bch2_bkey_to_replicas(&m->r.e, new);
1113 * gc recalculates this field from stripe ptr
1116 memset(m->block_sectors, 0, sizeof(m->block_sectors));
1118 for (i = 0; i < new_s->nr_blocks; i++) {
1119 ret = mark_stripe_bucket(trans, new, i, flags);
1124 ret = update_replicas(c, new, &m->r.e,
1125 ((s64) m->sectors * m->nr_redundant),
1128 struct printbuf buf = PRINTBUF;
1130 bch2_bkey_val_to_text(&buf, c, new);
1131 bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
1132 printbuf_exit(&buf);
1140 int bch2_mark_inode(struct btree_trans *trans,
1141 enum btree_id btree_id, unsigned level,
1142 struct bkey_s_c old, struct bkey_s_c new,
1145 struct bch_fs *c = trans->c;
1146 struct bch_fs_usage *fs_usage;
1147 u64 journal_seq = trans->journal_res.seq;
1149 if (flags & BTREE_TRIGGER_INSERT) {
1150 struct bch_inode_v3 *v = (struct bch_inode_v3 *) new.v;
1152 BUG_ON(!journal_seq);
1153 BUG_ON(new.k->type != KEY_TYPE_inode_v3);
1155 v->bi_journal_seq = cpu_to_le64(journal_seq);
1158 if (flags & BTREE_TRIGGER_GC) {
1159 percpu_down_read(&c->mark_lock);
1162 fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
1163 fs_usage->nr_inodes += bkey_is_inode(new.k);
1164 fs_usage->nr_inodes -= bkey_is_inode(old.k);
1167 percpu_up_read(&c->mark_lock);
1172 int bch2_mark_reservation(struct btree_trans *trans,
1173 enum btree_id btree_id, unsigned level,
1174 struct bkey_s_c old, struct bkey_s_c new,
1177 struct bch_fs *c = trans->c;
1178 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
1179 struct bch_fs_usage *fs_usage;
1180 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1181 s64 sectors = (s64) k.k->size;
1183 BUG_ON(!(flags & BTREE_TRIGGER_GC));
1185 if (flags & BTREE_TRIGGER_OVERWRITE)
1187 sectors *= replicas;
1189 percpu_down_read(&c->mark_lock);
1192 fs_usage = fs_usage_ptr(c, trans->journal_res.seq, flags & BTREE_TRIGGER_GC);
1193 replicas = clamp_t(unsigned, replicas, 1,
1194 ARRAY_SIZE(fs_usage->persistent_reserved));
1196 fs_usage->reserved += sectors;
1197 fs_usage->persistent_reserved[replicas - 1] += sectors;
1200 percpu_up_read(&c->mark_lock);
1205 static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
1206 struct bkey_s_c_reflink_p p,
1208 u64 *idx, unsigned flags, size_t r_idx)
1210 struct bch_fs *c = trans->c;
1211 struct reflink_gc *r;
1212 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1215 struct printbuf buf = PRINTBUF;
1217 if (r_idx >= c->reflink_gc_nr)
1220 r = genradix_ptr(&c->reflink_gc_table, r_idx);
1221 next_idx = min(next_idx, r->offset - r->size);
1222 if (*idx < next_idx)
1225 BUG_ON((s64) r->refcount + add < 0);
1231 if (fsck_err(c, "pointer to missing indirect extent\n"
1233 " missing range %llu-%llu",
1234 (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
1236 struct bkey_i_error *new;
1238 new = bch2_trans_kmalloc(trans, sizeof(*new));
1239 ret = PTR_ERR_OR_ZERO(new);
1244 new->k.type = KEY_TYPE_error;
1245 new->k.p = bkey_start_pos(p.k);
1246 new->k.p.offset += *idx - start;
1247 bch2_key_resize(&new->k, next_idx - *idx);
1248 ret = __bch2_btree_insert(trans, BTREE_ID_extents, &new->k_i,
1249 BTREE_TRIGGER_NORUN);
1255 printbuf_exit(&buf);
1259 int bch2_mark_reflink_p(struct btree_trans *trans,
1260 enum btree_id btree_id, unsigned level,
1261 struct bkey_s_c old, struct bkey_s_c new,
1264 struct bch_fs *c = trans->c;
1265 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
1266 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
1267 struct reflink_gc *ref;
1269 u64 idx = le64_to_cpu(p.v->idx), start = idx;
1270 u64 end = le64_to_cpu(p.v->idx) + p.k->size;
1273 BUG_ON(!(flags & BTREE_TRIGGER_GC));
1275 if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) {
1276 idx -= le32_to_cpu(p.v->front_pad);
1277 end += le32_to_cpu(p.v->back_pad);
1281 r = c->reflink_gc_nr;
1283 m = l + (r - l) / 2;
1285 ref = genradix_ptr(&c->reflink_gc_table, m);
1286 if (ref->offset <= idx)
1292 while (idx < end && !ret)
1293 ret = __bch2_mark_reflink_p(trans, p, start, end,
1299 void bch2_trans_fs_usage_revert(struct btree_trans *trans,
1300 struct replicas_delta_list *deltas)
1302 struct bch_fs *c = trans->c;
1303 struct bch_fs_usage *dst;
1304 struct replicas_delta *d, *top = (void *) deltas->d + deltas->used;
1308 percpu_down_read(&c->mark_lock);
1310 dst = fs_usage_ptr(c, trans->journal_res.seq, false);
1312 /* revert changes: */
1313 for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
1314 switch (d->r.data_type) {
1315 case BCH_DATA_btree:
1317 case BCH_DATA_parity:
1320 BUG_ON(__update_replicas(c, dst, &d->r, -d->delta));
1323 dst->nr_inodes -= deltas->nr_inodes;
1325 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
1326 added -= deltas->persistent_reserved[i];
1327 dst->reserved -= deltas->persistent_reserved[i];
1328 dst->persistent_reserved[i] -= deltas->persistent_reserved[i];
1332 trans->disk_res->sectors += added;
1333 this_cpu_add(*c->online_reserved, added);
1337 percpu_up_read(&c->mark_lock);
1340 int bch2_trans_fs_usage_apply(struct btree_trans *trans,
1341 struct replicas_delta_list *deltas)
1343 struct bch_fs *c = trans->c;
1344 static int warned_disk_usage = 0;
1346 unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1347 struct replicas_delta *d = deltas->d, *d2;
1348 struct replicas_delta *top = (void *) deltas->d + deltas->used;
1349 struct bch_fs_usage *dst;
1350 s64 added = 0, should_not_have_added;
1353 percpu_down_read(&c->mark_lock);
1355 dst = fs_usage_ptr(c, trans->journal_res.seq, false);
1357 for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
1358 switch (d->r.data_type) {
1359 case BCH_DATA_btree:
1361 case BCH_DATA_parity:
1365 if (__update_replicas(c, dst, &d->r, d->delta))
1369 dst->nr_inodes += deltas->nr_inodes;
1371 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
1372 added += deltas->persistent_reserved[i];
1373 dst->reserved += deltas->persistent_reserved[i];
1374 dst->persistent_reserved[i] += deltas->persistent_reserved[i];
1378 * Not allowed to reduce sectors_available except by getting a
1381 should_not_have_added = added - (s64) disk_res_sectors;
1382 if (unlikely(should_not_have_added > 0)) {
1383 u64 old, new, v = atomic64_read(&c->sectors_available);
1387 new = max_t(s64, 0, old - should_not_have_added);
1388 } while ((v = atomic64_cmpxchg(&c->sectors_available,
1391 added -= should_not_have_added;
1396 trans->disk_res->sectors -= added;
1397 this_cpu_sub(*c->online_reserved, added);
1401 percpu_up_read(&c->mark_lock);
1403 if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
1404 bch2_trans_inconsistent(trans,
1405 "disk usage increased %lli more than %u sectors reserved)",
1406 should_not_have_added, disk_res_sectors);
1409 /* revert changes: */
1410 for (d2 = deltas->d; d2 != d; d2 = replicas_delta_next(d2))
1411 BUG_ON(__update_replicas(c, dst, &d2->r, -d2->delta));
1414 percpu_up_read(&c->mark_lock);
1420 static inline int bch2_trans_mark_pointer(struct btree_trans *trans,
1421 enum btree_id btree_id, unsigned level,
1422 struct bkey_s_c k, struct extent_ptr_decoded p,
1425 bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
1426 struct btree_iter iter;
1427 struct bkey_i_alloc_v4 *a;
1429 struct bch_backpointer bp;
1433 bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket, &bp);
1434 sectors = bp.bucket_len;
1438 a = bch2_trans_start_alloc_update(trans, &iter, bucket);
1442 ret = __mark_pointer(trans, k, &p.ptr, sectors, bp.data_type,
1443 a->v.gen, &a->v.data_type,
1444 &a->v.dirty_sectors, &a->v.cached_sectors) ?:
1445 bch2_trans_update(trans, &iter, &a->k_i, 0);
1446 bch2_trans_iter_exit(trans, &iter);
1451 if (!p.ptr.cached) {
1452 ret = bch2_bucket_backpointer_mod(trans, bucket, bp, k, insert);
1460 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1461 struct extent_ptr_decoded p,
1462 s64 sectors, enum bch_data_type data_type)
1464 struct btree_iter iter;
1465 struct bkey_i_stripe *s;
1466 struct bch_replicas_padded r;
1469 s = bch2_bkey_get_mut_typed(trans, &iter,
1470 BTREE_ID_stripes, POS(0, p.ec.idx),
1471 BTREE_ITER_WITH_UPDATES, stripe);
1472 ret = PTR_ERR_OR_ZERO(s);
1473 if (unlikely(ret)) {
1474 bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
1475 "pointer to nonexistent stripe %llu",
1480 if (!bch2_ptr_matches_stripe(&s->v, p)) {
1481 bch2_trans_inconsistent(trans,
1482 "stripe pointer doesn't match stripe %llu",
1488 stripe_blockcount_set(&s->v, p.ec.block,
1489 stripe_blockcount_get(&s->v, p.ec.block) +
1492 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1493 r.e.data_type = data_type;
1494 ret = update_replicas_list(trans, &r.e, sectors);
1496 bch2_trans_iter_exit(trans, &iter);
1500 int bch2_trans_mark_extent(struct btree_trans *trans,
1501 enum btree_id btree_id, unsigned level,
1502 struct bkey_s_c old, struct bkey_i *new,
1505 struct bch_fs *c = trans->c;
1506 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
1508 : bkey_i_to_s_c(new);
1509 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1510 const union bch_extent_entry *entry;
1511 struct extent_ptr_decoded p;
1512 struct bch_replicas_padded r;
1513 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
1516 s64 sectors = bkey_is_btree_ptr(k.k)
1519 s64 dirty_sectors = 0;
1523 r.e.data_type = data_type;
1525 r.e.nr_required = 1;
1527 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1528 s64 disk_sectors = ptr_disk_sectors(sectors, p);
1530 if (flags & BTREE_TRIGGER_OVERWRITE)
1531 disk_sectors = -disk_sectors;
1533 ret = bch2_trans_mark_pointer(trans, btree_id, level, k, p, flags);
1541 ret = update_cached_sectors_list(trans, p.ptr.dev,
1546 } else if (!p.has_ec) {
1547 dirty_sectors += disk_sectors;
1548 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1550 ret = bch2_trans_mark_stripe_ptr(trans, p,
1551 disk_sectors, data_type);
1555 r.e.nr_required = 0;
1560 ret = update_replicas_list(trans, &r.e, dirty_sectors);
1565 static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
1566 struct bkey_s_c_stripe s,
1567 unsigned idx, bool deleting)
1569 struct bch_fs *c = trans->c;
1570 const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
1571 struct btree_iter iter;
1572 struct bkey_i_alloc_v4 *a;
1573 enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
1574 ? BCH_DATA_parity : 0;
1575 s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
1581 a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(c, ptr));
1585 ret = check_bucket_ref(trans, s.s_c, ptr, sectors, data_type,
1586 a->v.gen, a->v.data_type,
1587 a->v.dirty_sectors, a->v.cached_sectors);
1592 if (bch2_trans_inconsistent_on(a->v.stripe ||
1593 a->v.stripe_redundancy, trans,
1594 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
1595 iter.pos.inode, iter.pos.offset, a->v.gen,
1596 bch2_data_types[a->v.data_type],
1598 a->v.stripe, s.k->p.offset)) {
1603 if (bch2_trans_inconsistent_on(data_type && a->v.dirty_sectors, trans,
1604 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
1605 iter.pos.inode, iter.pos.offset, a->v.gen,
1606 bch2_data_types[a->v.data_type],
1613 a->v.stripe = s.k->p.offset;
1614 a->v.stripe_redundancy = s.v->nr_redundant;
1615 a->v.data_type = BCH_DATA_stripe;
1617 if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
1618 a->v.stripe_redundancy != s.v->nr_redundant, trans,
1619 "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
1620 iter.pos.inode, iter.pos.offset, a->v.gen,
1621 s.k->p.offset, a->v.stripe)) {
1627 a->v.stripe_redundancy = 0;
1628 a->v.data_type = alloc_data_type(a->v, BCH_DATA_user);
1631 a->v.dirty_sectors += sectors;
1633 a->v.data_type = !deleting ? data_type : 0;
1635 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1639 bch2_trans_iter_exit(trans, &iter);
1643 int bch2_trans_mark_stripe(struct btree_trans *trans,
1644 enum btree_id btree_id, unsigned level,
1645 struct bkey_s_c old, struct bkey_i *new,
1648 const struct bch_stripe *old_s = NULL;
1649 struct bch_stripe *new_s = NULL;
1650 struct bch_replicas_padded r;
1651 unsigned i, nr_blocks;
1654 if (old.k->type == KEY_TYPE_stripe)
1655 old_s = bkey_s_c_to_stripe(old).v;
1656 if (new->k.type == KEY_TYPE_stripe)
1657 new_s = &bkey_i_to_stripe(new)->v;
1660 * If the pointers aren't changing, we don't need to do anything:
1662 if (new_s && old_s &&
1663 new_s->nr_blocks == old_s->nr_blocks &&
1664 new_s->nr_redundant == old_s->nr_redundant &&
1665 !memcmp(old_s->ptrs, new_s->ptrs,
1666 new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
1669 BUG_ON(new_s && old_s &&
1670 (new_s->nr_blocks != old_s->nr_blocks ||
1671 new_s->nr_redundant != old_s->nr_redundant));
1673 nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
1676 s64 sectors = le16_to_cpu(new_s->sectors);
1678 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(new));
1679 ret = update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant);
1685 s64 sectors = -((s64) le16_to_cpu(old_s->sectors));
1687 bch2_bkey_to_replicas(&r.e, old);
1688 ret = update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant);
1693 for (i = 0; i < nr_blocks; i++) {
1694 if (new_s && old_s &&
1695 !memcmp(&new_s->ptrs[i],
1697 sizeof(new_s->ptrs[i])))
1701 ret = bch2_trans_mark_stripe_bucket(trans,
1702 bkey_i_to_s_c_stripe(new), i, false);
1708 ret = bch2_trans_mark_stripe_bucket(trans,
1709 bkey_s_c_to_stripe(old), i, true);
1718 int bch2_trans_mark_inode(struct btree_trans *trans,
1719 enum btree_id btree_id, unsigned level,
1720 struct bkey_s_c old,
1724 int nr = bkey_is_inode(&new->k) - bkey_is_inode(old.k);
1727 int ret = replicas_deltas_realloc(trans, 0);
1728 struct replicas_delta_list *d = trans->fs_usage_deltas;
1739 int bch2_trans_mark_reservation(struct btree_trans *trans,
1740 enum btree_id btree_id, unsigned level,
1741 struct bkey_s_c old,
1745 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
1747 : bkey_i_to_s_c(new);
1748 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1749 s64 sectors = (s64) k.k->size;
1750 struct replicas_delta_list *d;
1753 if (flags & BTREE_TRIGGER_OVERWRITE)
1755 sectors *= replicas;
1757 ret = replicas_deltas_realloc(trans, 0);
1761 d = trans->fs_usage_deltas;
1762 replicas = clamp_t(unsigned, replicas, 1,
1763 ARRAY_SIZE(d->persistent_reserved));
1765 d->persistent_reserved[replicas - 1] += sectors;
1769 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1770 struct bkey_s_c_reflink_p p,
1771 u64 *idx, unsigned flags)
1773 struct bch_fs *c = trans->c;
1774 struct btree_iter iter;
1777 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1778 struct printbuf buf = PRINTBUF;
1781 k = bch2_bkey_get_mut_noupdate(trans, &iter,
1782 BTREE_ID_reflink, POS(0, *idx),
1783 BTREE_ITER_WITH_UPDATES);
1784 ret = PTR_ERR_OR_ZERO(k);
1788 refcount = bkey_refcount(k);
1790 bch2_bkey_val_to_text(&buf, c, p.s_c);
1791 bch2_trans_inconsistent(trans,
1792 "nonexistent indirect extent at %llu while marking\n %s",
1798 if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
1799 bch2_bkey_val_to_text(&buf, c, p.s_c);
1800 bch2_trans_inconsistent(trans,
1801 "indirect extent refcount underflow at %llu while marking\n %s",
1807 if (flags & BTREE_TRIGGER_INSERT) {
1808 struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
1811 pad = max_t(s64, le32_to_cpu(v->front_pad),
1812 le64_to_cpu(v->idx) - bkey_start_offset(&k->k));
1813 BUG_ON(pad > U32_MAX);
1814 v->front_pad = cpu_to_le32(pad);
1816 pad = max_t(s64, le32_to_cpu(v->back_pad),
1817 k->k.p.offset - p.k->size - le64_to_cpu(v->idx));
1818 BUG_ON(pad > U32_MAX);
1819 v->back_pad = cpu_to_le32(pad);
1822 le64_add_cpu(refcount, add);
1824 bch2_btree_iter_set_pos_to_extent_start(&iter);
1825 ret = bch2_trans_update(trans, &iter, k, 0);
1829 *idx = k->k.p.offset;
1831 bch2_trans_iter_exit(trans, &iter);
1832 printbuf_exit(&buf);
1836 int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1837 enum btree_id btree_id, unsigned level,
1838 struct bkey_s_c old,
1842 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
1844 : bkey_i_to_s_c(new);
1845 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
1849 if (flags & BTREE_TRIGGER_INSERT) {
1850 struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
1852 v->front_pad = v->back_pad = 0;
1855 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
1856 end_idx = le64_to_cpu(p.v->idx) + p.k->size +
1857 le32_to_cpu(p.v->back_pad);
1859 while (idx < end_idx && !ret)
1860 ret = __bch2_trans_mark_reflink_p(trans, p, &idx, flags);
1865 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1866 struct bch_dev *ca, size_t b,
1867 enum bch_data_type type,
1870 struct bch_fs *c = trans->c;
1871 struct btree_iter iter;
1872 struct bkey_i_alloc_v4 *a;
1876 * Backup superblock might be past the end of our normal usable space:
1878 if (b >= ca->mi.nbuckets)
1881 a = bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
1885 if (a->v.data_type && type && a->v.data_type != type) {
1886 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1887 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
1889 iter.pos.inode, iter.pos.offset, a->v.gen,
1890 bch2_data_types[a->v.data_type],
1891 bch2_data_types[type],
1892 bch2_data_types[type]);
1897 a->v.data_type = type;
1898 a->v.dirty_sectors = sectors;
1900 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1904 bch2_trans_iter_exit(trans, &iter);
1908 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1909 struct bch_dev *ca, size_t b,
1910 enum bch_data_type type,
1913 return commit_do(trans, NULL, NULL, 0,
1914 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
1917 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
1920 enum bch_data_type type,
1921 u64 *bucket, unsigned *bucket_sectors)
1924 u64 b = sector_to_bucket(ca, start);
1926 min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1928 if (b != *bucket && *bucket_sectors) {
1929 int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1930 type, *bucket_sectors);
1934 *bucket_sectors = 0;
1938 *bucket_sectors += sectors;
1940 } while (start < end);
1945 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
1948 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1950 unsigned i, bucket_sectors = 0;
1953 for (i = 0; i < layout->nr_superblocks; i++) {
1954 u64 offset = le64_to_cpu(layout->sb_offset[i]);
1956 if (offset == BCH_SB_SECTOR) {
1957 ret = bch2_trans_mark_metadata_sectors(trans, ca,
1959 BCH_DATA_sb, &bucket, &bucket_sectors);
1964 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1965 offset + (1 << layout->sb_max_size_bits),
1966 BCH_DATA_sb, &bucket, &bucket_sectors);
1971 if (bucket_sectors) {
1972 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1973 bucket, BCH_DATA_sb, bucket_sectors);
1978 for (i = 0; i < ca->journal.nr; i++) {
1979 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1980 ca->journal.buckets[i],
1981 BCH_DATA_journal, ca->mi.bucket_size);
1989 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
1991 int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(&trans, ca));
1997 /* Disk reservations: */
1999 #define SECTORS_CACHE 1024
2001 int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
2002 u64 sectors, int flags)
2004 struct bch_fs_pcpu *pcpu;
2006 s64 sectors_available;
2009 percpu_down_read(&c->mark_lock);
2011 pcpu = this_cpu_ptr(c->pcpu);
2013 if (sectors <= pcpu->sectors_available)
2016 v = atomic64_read(&c->sectors_available);
2019 get = min((u64) sectors + SECTORS_CACHE, old);
2021 if (get < sectors) {
2025 } while ((v = atomic64_cmpxchg(&c->sectors_available,
2026 old, old - get)) != old);
2028 pcpu->sectors_available += get;
2031 pcpu->sectors_available -= sectors;
2032 this_cpu_add(*c->online_reserved, sectors);
2033 res->sectors += sectors;
2036 percpu_up_read(&c->mark_lock);
2040 mutex_lock(&c->sectors_available_lock);
2042 percpu_u64_set(&c->pcpu->sectors_available, 0);
2043 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
2045 if (sectors <= sectors_available ||
2046 (flags & BCH_DISK_RESERVATION_NOFAIL)) {
2047 atomic64_set(&c->sectors_available,
2048 max_t(s64, 0, sectors_available - sectors));
2049 this_cpu_add(*c->online_reserved, sectors);
2050 res->sectors += sectors;
2053 atomic64_set(&c->sectors_available, sectors_available);
2054 ret = -BCH_ERR_ENOSPC_disk_reservation;
2057 mutex_unlock(&c->sectors_available_lock);
2058 percpu_up_read(&c->mark_lock);
2063 /* Startup/shutdown: */
2065 static void bucket_gens_free_rcu(struct rcu_head *rcu)
2067 struct bucket_gens *buckets =
2068 container_of(rcu, struct bucket_gens, rcu);
2070 kvpfree(buckets, sizeof(*buckets) + buckets->nbuckets);
2073 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2075 struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
2076 unsigned long *buckets_nouse = NULL;
2077 bool resize = ca->bucket_gens != NULL;
2080 if (!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
2081 GFP_KERNEL|__GFP_ZERO))) {
2082 ret = -BCH_ERR_ENOMEM_bucket_gens;
2086 if ((c->opts.buckets_nouse &&
2087 !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2088 sizeof(unsigned long),
2089 GFP_KERNEL|__GFP_ZERO)))) {
2090 ret = -BCH_ERR_ENOMEM_buckets_nouse;
2094 bucket_gens->first_bucket = ca->mi.first_bucket;
2095 bucket_gens->nbuckets = nbuckets;
2097 bch2_copygc_stop(c);
2100 down_write(&c->gc_lock);
2101 down_write(&ca->bucket_lock);
2102 percpu_down_write(&c->mark_lock);
2105 old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
2108 size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets);
2110 memcpy(bucket_gens->b,
2114 memcpy(buckets_nouse,
2116 BITS_TO_LONGS(n) * sizeof(unsigned long));
2119 rcu_assign_pointer(ca->bucket_gens, bucket_gens);
2120 bucket_gens = old_bucket_gens;
2122 swap(ca->buckets_nouse, buckets_nouse);
2124 nbuckets = ca->mi.nbuckets;
2127 percpu_up_write(&c->mark_lock);
2128 up_write(&ca->bucket_lock);
2129 up_write(&c->gc_lock);
2134 kvpfree(buckets_nouse,
2135 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2137 call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
2142 void bch2_dev_buckets_free(struct bch_dev *ca)
2146 kvpfree(ca->buckets_nouse,
2147 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2148 kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
2149 sizeof(struct bucket_gens) + ca->mi.nbuckets);
2151 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
2152 free_percpu(ca->usage[i]);
2153 kfree(ca->usage_base);
2156 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2160 ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
2161 if (!ca->usage_base)
2162 return -BCH_ERR_ENOMEM_usage_init;
2164 for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
2165 ca->usage[i] = alloc_percpu(struct bch_dev_usage);
2167 return -BCH_ERR_ENOMEM_usage_init;
2170 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);