1 // SPDX-License-Identifier: GPL-2.0
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
9 #include "alloc_background.h"
10 #include "backpointers.h"
13 #include "btree_update.h"
15 #include "buckets_waiting_for_journal.h"
23 #include "subvolume.h"
25 #include <linux/preempt.h>
26 #include <trace/events/bcachefs.h>
28 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
29 enum bch_data_type data_type,
34 fs_usage->btree += sectors;
38 fs_usage->data += sectors;
41 fs_usage->cached += sectors;
48 void bch2_fs_usage_initialize(struct bch_fs *c)
50 struct bch_fs_usage *usage;
54 percpu_down_write(&c->mark_lock);
55 usage = c->usage_base;
57 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
58 bch2_fs_usage_acc_to_base(c, i);
60 for (i = 0; i < BCH_REPLICAS_MAX; i++)
61 usage->reserved += usage->persistent_reserved[i];
63 for (i = 0; i < c->replicas.nr; i++) {
64 struct bch_replicas_entry *e =
65 cpu_replicas_entry(&c->replicas, i);
67 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
70 for_each_member_device(ca, c, i) {
71 struct bch_dev_usage dev = bch2_dev_usage_read(ca);
73 usage->hidden += (dev.d[BCH_DATA_sb].buckets +
74 dev.d[BCH_DATA_journal].buckets) *
78 percpu_up_write(&c->mark_lock);
81 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
85 BUG_ON(!gc && !journal_seq);
87 return this_cpu_ptr(gc
89 : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
92 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
94 struct bch_fs *c = ca->fs;
95 struct bch_dev_usage ret;
96 unsigned seq, i, u64s = dev_usage_u64s();
99 seq = read_seqcount_begin(&c->usage_lock);
100 memcpy(&ret, ca->usage_base, u64s * sizeof(u64));
101 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
102 acc_u64s_percpu((u64 *) &ret, (u64 __percpu *) ca->usage[i], u64s);
103 } while (read_seqcount_retry(&c->usage_lock, seq));
108 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
109 unsigned journal_seq,
112 percpu_rwsem_assert_held(&c->mark_lock);
113 BUG_ON(!gc && !journal_seq);
115 return this_cpu_ptr(gc
117 : c->usage[journal_seq & JOURNAL_BUF_MASK]);
120 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
122 ssize_t offset = v - (u64 *) c->usage_base;
126 BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
127 percpu_rwsem_assert_held(&c->mark_lock);
130 seq = read_seqcount_begin(&c->usage_lock);
133 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
134 ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
135 } while (read_seqcount_retry(&c->usage_lock, seq));
140 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
142 struct bch_fs_usage_online *ret;
143 unsigned seq, i, u64s;
145 percpu_down_read(&c->mark_lock);
147 ret = kmalloc(sizeof(struct bch_fs_usage_online) +
148 sizeof(u64) * c->replicas.nr, GFP_NOFS);
149 if (unlikely(!ret)) {
150 percpu_up_read(&c->mark_lock);
154 ret->online_reserved = percpu_u64_get(c->online_reserved);
156 u64s = fs_usage_u64s(c);
158 seq = read_seqcount_begin(&c->usage_lock);
159 memcpy(&ret->u, c->usage_base, u64s * sizeof(u64));
160 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
161 acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], u64s);
162 } while (read_seqcount_retry(&c->usage_lock, seq));
167 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
170 unsigned i, u64s = fs_usage_u64s(c);
172 BUG_ON(idx >= ARRAY_SIZE(c->usage));
175 write_seqcount_begin(&c->usage_lock);
177 acc_u64s_percpu((u64 *) c->usage_base,
178 (u64 __percpu *) c->usage[idx], u64s);
179 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
182 for_each_member_device_rcu(ca, c, i, NULL) {
183 u64s = dev_usage_u64s();
185 acc_u64s_percpu((u64 *) ca->usage_base,
186 (u64 __percpu *) ca->usage[idx], u64s);
187 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
191 write_seqcount_end(&c->usage_lock);
195 void bch2_fs_usage_to_text(struct printbuf *out,
197 struct bch_fs_usage_online *fs_usage)
201 prt_printf(out, "capacity:\t\t\t%llu\n", c->capacity);
203 prt_printf(out, "hidden:\t\t\t\t%llu\n",
205 prt_printf(out, "data:\t\t\t\t%llu\n",
207 prt_printf(out, "cached:\t\t\t\t%llu\n",
209 prt_printf(out, "reserved:\t\t\t%llu\n",
210 fs_usage->u.reserved);
211 prt_printf(out, "nr_inodes:\t\t\t%llu\n",
212 fs_usage->u.nr_inodes);
213 prt_printf(out, "online reserved:\t\t%llu\n",
214 fs_usage->online_reserved);
217 i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
219 prt_printf(out, "%u replicas:\n", i + 1);
220 prt_printf(out, "\treserved:\t\t%llu\n",
221 fs_usage->u.persistent_reserved[i]);
224 for (i = 0; i < c->replicas.nr; i++) {
225 struct bch_replicas_entry *e =
226 cpu_replicas_entry(&c->replicas, i);
228 prt_printf(out, "\t");
229 bch2_replicas_entry_to_text(out, e);
230 prt_printf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
234 static u64 reserve_factor(u64 r)
236 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
239 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
241 return min(fs_usage->u.hidden +
244 reserve_factor(fs_usage->u.reserved +
245 fs_usage->online_reserved),
249 static struct bch_fs_usage_short
250 __bch2_fs_usage_read_short(struct bch_fs *c)
252 struct bch_fs_usage_short ret;
255 ret.capacity = c->capacity -
256 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
258 data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
259 bch2_fs_usage_read_one(c, &c->usage_base->btree);
260 reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
261 percpu_u64_get(c->online_reserved);
263 ret.used = min(ret.capacity, data + reserve_factor(reserved));
264 ret.free = ret.capacity - ret.used;
266 ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
271 struct bch_fs_usage_short
272 bch2_fs_usage_read_short(struct bch_fs *c)
274 struct bch_fs_usage_short ret;
276 percpu_down_read(&c->mark_lock);
277 ret = __bch2_fs_usage_read_short(c);
278 percpu_up_read(&c->mark_lock);
283 void bch2_dev_usage_init(struct bch_dev *ca)
285 ca->usage_base->d[BCH_DATA_free].buckets = ca->mi.nbuckets - ca->mi.first_bucket;
288 static inline int bucket_sectors_fragmented(struct bch_dev *ca,
289 struct bch_alloc_v4 a)
291 return a.dirty_sectors
292 ? max(0, (int) ca->mi.bucket_size - (int) a.dirty_sectors)
296 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
297 struct bch_alloc_v4 old,
298 struct bch_alloc_v4 new,
299 u64 journal_seq, bool gc)
301 struct bch_fs_usage *fs_usage;
302 struct bch_dev_usage *u;
305 fs_usage = fs_usage_ptr(c, journal_seq, gc);
307 if (data_type_is_hidden(old.data_type))
308 fs_usage->hidden -= ca->mi.bucket_size;
309 if (data_type_is_hidden(new.data_type))
310 fs_usage->hidden += ca->mi.bucket_size;
312 u = dev_usage_ptr(ca, journal_seq, gc);
314 u->d[old.data_type].buckets--;
315 u->d[new.data_type].buckets++;
317 u->buckets_ec -= (int) !!old.stripe;
318 u->buckets_ec += (int) !!new.stripe;
320 u->d[old.data_type].sectors -= old.dirty_sectors;
321 u->d[new.data_type].sectors += new.dirty_sectors;
323 u->d[BCH_DATA_cached].sectors += new.cached_sectors;
324 u->d[BCH_DATA_cached].sectors -= old.cached_sectors;
326 u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old);
327 u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new);
332 static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
333 struct bucket old, struct bucket new,
334 u64 journal_seq, bool gc)
336 struct bch_alloc_v4 old_a = {
338 .data_type = old.data_type,
339 .dirty_sectors = old.dirty_sectors,
340 .cached_sectors = old.cached_sectors,
341 .stripe = old.stripe,
343 struct bch_alloc_v4 new_a = {
345 .data_type = new.data_type,
346 .dirty_sectors = new.dirty_sectors,
347 .cached_sectors = new.cached_sectors,
348 .stripe = new.stripe,
351 bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
354 static inline int __update_replicas(struct bch_fs *c,
355 struct bch_fs_usage *fs_usage,
356 struct bch_replicas_entry *r,
359 int idx = bch2_replicas_entry_idx(c, r);
364 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
365 fs_usage->replicas[idx] += sectors;
369 static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
370 struct bch_replicas_entry *r, s64 sectors,
371 unsigned journal_seq, bool gc)
373 struct bch_fs_usage __percpu *fs_usage;
375 struct printbuf buf = PRINTBUF;
377 percpu_down_read(&c->mark_lock);
380 idx = bch2_replicas_entry_idx(c, r);
382 fsck_err(c, "no replicas entry\n"
384 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
385 percpu_up_read(&c->mark_lock);
386 ret = bch2_mark_replicas(c, r);
387 percpu_down_read(&c->mark_lock);
391 idx = bch2_replicas_entry_idx(c, r);
399 fs_usage = fs_usage_ptr(c, journal_seq, gc);
400 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
401 fs_usage->replicas[idx] += sectors;
405 percpu_up_read(&c->mark_lock);
410 static inline int update_cached_sectors(struct bch_fs *c,
412 unsigned dev, s64 sectors,
413 unsigned journal_seq, bool gc)
415 struct bch_replicas_padded r;
417 bch2_replicas_entry_cached(&r.e, dev);
419 return update_replicas(c, k, &r.e, sectors, journal_seq, gc);
422 static struct replicas_delta_list *
423 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
425 struct replicas_delta_list *d = trans->fs_usage_deltas;
426 unsigned new_size = d ? (d->size + more) * 2 : 128;
427 unsigned alloc_size = sizeof(*d) + new_size;
429 WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
431 if (!d || d->used + more > d->size) {
432 d = krealloc(d, alloc_size, GFP_NOIO|__GFP_ZERO);
434 BUG_ON(!d && alloc_size > REPLICAS_DELTA_LIST_MAX);
437 d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOIO);
438 memset(d, 0, REPLICAS_DELTA_LIST_MAX);
440 if (trans->fs_usage_deltas)
441 memcpy(d, trans->fs_usage_deltas,
442 trans->fs_usage_deltas->size + sizeof(*d));
444 new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
445 kfree(trans->fs_usage_deltas);
449 trans->fs_usage_deltas = d;
454 static inline void update_replicas_list(struct btree_trans *trans,
455 struct bch_replicas_entry *r,
458 struct replicas_delta_list *d;
459 struct replicas_delta *n;
465 b = replicas_entry_bytes(r) + 8;
466 d = replicas_deltas_realloc(trans, b);
468 n = (void *) d->d + d->used;
470 memcpy((void *) n + offsetof(struct replicas_delta, r),
471 r, replicas_entry_bytes(r));
472 bch2_replicas_entry_sort(&n->r);
476 static inline void update_cached_sectors_list(struct btree_trans *trans,
477 unsigned dev, s64 sectors)
479 struct bch_replicas_padded r;
481 bch2_replicas_entry_cached(&r.e, dev);
483 update_replicas_list(trans, &r.e, sectors);
486 int bch2_mark_alloc(struct btree_trans *trans,
487 struct bkey_s_c old, struct bkey_s_c new,
490 bool gc = flags & BTREE_TRIGGER_GC;
491 u64 journal_seq = trans->journal_res.seq;
492 struct bch_fs *c = trans->c;
493 struct bch_alloc_v4 old_a, new_a;
498 * alloc btree is read in by bch2_alloc_read, not gc:
500 if ((flags & BTREE_TRIGGER_GC) &&
501 !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
504 if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
505 "alloc key for invalid device or bucket"))
508 ca = bch_dev_bkey_exists(c, new.k->p.inode);
510 bch2_alloc_to_v4(old, &old_a);
511 bch2_alloc_to_v4(new, &new_a);
513 if ((flags & BTREE_TRIGGER_INSERT) &&
514 data_type_is_empty(old_a.data_type) !=
515 data_type_is_empty(new_a.data_type) &&
516 new.k->type == KEY_TYPE_alloc_v4) {
517 struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
519 BUG_ON(!journal_seq);
522 * If the btree updates referring to a bucket weren't flushed
523 * before the bucket became empty again, then the we don't have
524 * to wait on a journal flush before we can reuse the bucket:
526 new_a.journal_seq = data_type_is_empty(new_a.data_type) &&
527 (journal_seq == v->journal_seq ||
528 bch2_journal_noflush_seq(&c->journal, v->journal_seq))
530 v->journal_seq = new_a.journal_seq;
533 if (!data_type_is_empty(old_a.data_type) &&
534 data_type_is_empty(new_a.data_type) &&
536 ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
537 c->journal.flushed_seq_ondisk,
538 new.k->p.inode, new.k->p.offset,
541 bch2_fs_fatal_error(c,
542 "error setting bucket_needs_journal_commit: %i", ret);
547 percpu_down_read(&c->mark_lock);
548 if (!gc && new_a.gen != old_a.gen)
549 *bucket_gen(ca, new.k->p.offset) = new_a.gen;
551 bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
554 struct bucket *g = gc_bucket(ca, new.k->p.offset);
560 g->data_type = new_a.data_type;
561 g->stripe = new_a.stripe;
562 g->stripe_redundancy = new_a.stripe_redundancy;
563 g->dirty_sectors = new_a.dirty_sectors;
564 g->cached_sectors = new_a.cached_sectors;
568 percpu_up_read(&c->mark_lock);
571 * need to know if we're getting called from the invalidate path or
575 if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
576 old_a.cached_sectors) {
577 ret = update_cached_sectors(c, new, ca->dev_idx,
578 -((s64) old_a.cached_sectors),
581 bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors");
586 if (new_a.data_type == BCH_DATA_free &&
587 (!new_a.journal_seq || new_a.journal_seq < c->journal.flushed_seq_ondisk))
588 closure_wake_up(&c->freelist_wait);
590 if (new_a.data_type == BCH_DATA_need_discard &&
591 (!new_a.journal_seq || new_a.journal_seq < c->journal.flushed_seq_ondisk))
594 if (old_a.data_type != BCH_DATA_cached &&
595 new_a.data_type == BCH_DATA_cached &&
596 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
597 bch2_do_invalidates(c);
599 if (new_a.data_type == BCH_DATA_need_gc_gens)
605 int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
606 size_t b, enum bch_data_type data_type,
607 unsigned sectors, struct gc_pos pos,
610 struct bucket old, new, *g;
613 BUG_ON(!(flags & BTREE_TRIGGER_GC));
614 BUG_ON(data_type != BCH_DATA_sb &&
615 data_type != BCH_DATA_journal);
618 * Backup superblock might be past the end of our normal usable space:
620 if (b >= ca->mi.nbuckets)
623 percpu_down_read(&c->mark_lock);
624 g = gc_bucket(ca, b);
629 if (bch2_fs_inconsistent_on(g->data_type &&
630 g->data_type != data_type, c,
631 "different types of data in same bucket: %s, %s",
632 bch2_data_types[g->data_type],
633 bch2_data_types[data_type])) {
638 if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
639 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
640 ca->dev_idx, b, g->gen,
641 bch2_data_types[g->data_type ?: data_type],
642 g->dirty_sectors, sectors)) {
648 g->data_type = data_type;
649 g->dirty_sectors += sectors;
654 bch2_dev_usage_update_m(c, ca, old, new, 0, true);
655 percpu_up_read(&c->mark_lock);
659 static int check_bucket_ref(struct bch_fs *c,
661 const struct bch_extent_ptr *ptr,
662 s64 sectors, enum bch_data_type ptr_data_type,
663 u8 b_gen, u8 bucket_data_type,
664 u32 dirty_sectors, u32 cached_sectors)
666 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
667 size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
668 u16 bucket_sectors = !ptr->cached
671 struct printbuf buf = PRINTBUF;
674 if (bucket_data_type == BCH_DATA_cached)
675 bucket_data_type = BCH_DATA_user;
677 if ((bucket_data_type == BCH_DATA_stripe && ptr_data_type == BCH_DATA_user) ||
678 (bucket_data_type == BCH_DATA_user && ptr_data_type == BCH_DATA_stripe))
679 bucket_data_type = ptr_data_type = BCH_DATA_stripe;
681 if (gen_after(ptr->gen, b_gen)) {
682 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
683 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
685 ptr->dev, bucket_nr, b_gen,
686 bch2_data_types[bucket_data_type ?: ptr_data_type],
688 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
693 if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
694 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
695 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
697 ptr->dev, bucket_nr, b_gen,
698 bch2_data_types[bucket_data_type ?: ptr_data_type],
700 (printbuf_reset(&buf),
701 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
706 if (b_gen != ptr->gen && !ptr->cached) {
707 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
708 "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
710 ptr->dev, bucket_nr, b_gen,
711 *bucket_gen(ca, bucket_nr),
712 bch2_data_types[bucket_data_type ?: ptr_data_type],
714 (printbuf_reset(&buf),
715 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
720 if (b_gen != ptr->gen) {
725 if (!data_type_is_empty(bucket_data_type) &&
727 bucket_data_type != ptr_data_type) {
728 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
729 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
731 ptr->dev, bucket_nr, b_gen,
732 bch2_data_types[bucket_data_type],
733 bch2_data_types[ptr_data_type],
734 (printbuf_reset(&buf),
735 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
740 if ((unsigned) (bucket_sectors + sectors) > U32_MAX) {
741 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
742 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
744 ptr->dev, bucket_nr, b_gen,
745 bch2_data_types[bucket_data_type ?: ptr_data_type],
746 bucket_sectors, sectors,
747 (printbuf_reset(&buf),
748 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
757 static int mark_stripe_bucket(struct btree_trans *trans,
762 struct bch_fs *c = trans->c;
763 u64 journal_seq = trans->journal_res.seq;
764 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
765 unsigned nr_data = s->nr_blocks - s->nr_redundant;
766 bool parity = ptr_idx >= nr_data;
767 enum bch_data_type data_type = parity ? BCH_DATA_parity : 0;
768 s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
769 const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
770 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
771 struct bucket old, new, *g;
772 struct printbuf buf = PRINTBUF;
775 BUG_ON(!(flags & BTREE_TRIGGER_GC));
777 /* * XXX doesn't handle deletion */
779 percpu_down_read(&c->mark_lock);
781 g = PTR_GC_BUCKET(ca, ptr);
783 if (g->dirty_sectors ||
784 (g->stripe && g->stripe != k.k->p.offset)) {
785 bch2_fs_inconsistent(c,
786 "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
787 ptr->dev, PTR_BUCKET_NR(ca, ptr), g->gen,
788 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
796 ret = check_bucket_ref(c, k, ptr, sectors, data_type,
797 g->gen, g->data_type,
798 g->dirty_sectors, g->cached_sectors);
803 g->data_type = data_type;
804 g->dirty_sectors += sectors;
806 g->stripe = k.k->p.offset;
807 g->stripe_redundancy = s->nr_redundant;
812 bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
813 percpu_up_read(&c->mark_lock);
818 static int __mark_pointer(struct btree_trans *trans,
820 const struct bch_extent_ptr *ptr,
821 s64 sectors, enum bch_data_type ptr_data_type,
822 u8 bucket_gen, u8 *bucket_data_type,
823 u32 *dirty_sectors, u32 *cached_sectors)
825 u32 *dst_sectors = !ptr->cached
828 int ret = check_bucket_ref(trans->c, k, ptr, sectors, ptr_data_type,
829 bucket_gen, *bucket_data_type,
830 *dirty_sectors, *cached_sectors);
835 *dst_sectors += sectors;
836 *bucket_data_type = *dirty_sectors || *cached_sectors
841 static int bch2_mark_pointer(struct btree_trans *trans,
843 struct extent_ptr_decoded p,
844 s64 sectors, enum bch_data_type data_type,
847 u64 journal_seq = trans->journal_res.seq;
848 struct bch_fs *c = trans->c;
849 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
850 struct bucket old, new, *g;
854 BUG_ON(!(flags & BTREE_TRIGGER_GC));
856 percpu_down_read(&c->mark_lock);
857 g = PTR_GC_BUCKET(ca, &p.ptr);
861 bucket_data_type = g->data_type;
862 ret = __mark_pointer(trans, k, &p.ptr, sectors,
868 g->data_type = bucket_data_type;
873 bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
874 percpu_up_read(&c->mark_lock);
879 static int bch2_mark_stripe_ptr(struct btree_trans *trans,
881 struct bch_extent_stripe_ptr p,
882 enum bch_data_type data_type,
886 struct bch_fs *c = trans->c;
887 struct bch_replicas_padded r;
890 BUG_ON(!(flags & BTREE_TRIGGER_GC));
892 m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
894 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
899 spin_lock(&c->ec_stripes_heap_lock);
901 if (!m || !m->alive) {
902 spin_unlock(&c->ec_stripes_heap_lock);
903 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
905 bch2_inconsistent_error(c);
909 m->block_sectors[p.block] += sectors;
912 spin_unlock(&c->ec_stripes_heap_lock);
914 r.e.data_type = data_type;
915 update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
920 int bch2_mark_extent(struct btree_trans *trans,
921 struct bkey_s_c old, struct bkey_s_c new,
924 u64 journal_seq = trans->journal_res.seq;
925 struct bch_fs *c = trans->c;
926 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
927 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
928 const union bch_extent_entry *entry;
929 struct extent_ptr_decoded p;
930 struct bch_replicas_padded r;
931 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
934 s64 sectors = bkey_is_btree_ptr(k.k)
937 s64 dirty_sectors = 0;
941 BUG_ON(!(flags & BTREE_TRIGGER_GC));
943 r.e.data_type = data_type;
947 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
948 s64 disk_sectors = ptr_disk_sectors(sectors, p);
950 if (flags & BTREE_TRIGGER_OVERWRITE)
951 disk_sectors = -disk_sectors;
953 ret = bch2_mark_pointer(trans, k, p, disk_sectors,
962 ret = update_cached_sectors(c, k, p.ptr.dev,
963 disk_sectors, journal_seq, true);
965 bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
969 } else if (!p.has_ec) {
970 dirty_sectors += disk_sectors;
971 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
973 ret = bch2_mark_stripe_ptr(trans, k, p.ec, data_type,
974 disk_sectors, flags);
979 * There may be other dirty pointers in this extent, but
980 * if so they're not required for mounting if we have an
981 * erasure coded pointer in this extent:
988 ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true);
990 struct printbuf buf = PRINTBUF;
992 bch2_bkey_val_to_text(&buf, c, k);
993 bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
1002 int bch2_mark_stripe(struct btree_trans *trans,
1003 struct bkey_s_c old, struct bkey_s_c new,
1006 bool gc = flags & BTREE_TRIGGER_GC;
1007 u64 journal_seq = trans->journal_res.seq;
1008 struct bch_fs *c = trans->c;
1009 u64 idx = new.k->p.offset;
1010 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1011 ? bkey_s_c_to_stripe(old).v : NULL;
1012 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1013 ? bkey_s_c_to_stripe(new).v : NULL;
1017 BUG_ON(gc && old_s);
1020 struct stripe *m = genradix_ptr(&c->stripes, idx);
1022 if (!m || (old_s && !m->alive)) {
1023 struct printbuf buf1 = PRINTBUF;
1024 struct printbuf buf2 = PRINTBUF;
1026 bch2_bkey_val_to_text(&buf1, c, old);
1027 bch2_bkey_val_to_text(&buf2, c, new);
1028 bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
1030 "new %s", idx, buf1.buf, buf2.buf);
1031 printbuf_exit(&buf2);
1032 printbuf_exit(&buf1);
1033 bch2_inconsistent_error(c);
1038 spin_lock(&c->ec_stripes_heap_lock);
1039 bch2_stripes_heap_del(c, m, idx);
1040 spin_unlock(&c->ec_stripes_heap_lock);
1042 memset(m, 0, sizeof(*m));
1045 m->sectors = le16_to_cpu(new_s->sectors);
1046 m->algorithm = new_s->algorithm;
1047 m->nr_blocks = new_s->nr_blocks;
1048 m->nr_redundant = new_s->nr_redundant;
1049 m->blocks_nonempty = 0;
1051 for (i = 0; i < new_s->nr_blocks; i++)
1052 m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
1054 spin_lock(&c->ec_stripes_heap_lock);
1055 bch2_stripes_heap_update(c, m, idx);
1056 spin_unlock(&c->ec_stripes_heap_lock);
1059 struct gc_stripe *m =
1060 genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
1063 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
1068 * This will be wrong when we bring back runtime gc: we should
1069 * be unmarking the old key and then marking the new key
1072 m->sectors = le16_to_cpu(new_s->sectors);
1073 m->nr_blocks = new_s->nr_blocks;
1074 m->nr_redundant = new_s->nr_redundant;
1076 for (i = 0; i < new_s->nr_blocks; i++)
1077 m->ptrs[i] = new_s->ptrs[i];
1079 bch2_bkey_to_replicas(&m->r.e, new);
1082 * gc recalculates this field from stripe ptr
1085 memset(m->block_sectors, 0, sizeof(m->block_sectors));
1087 for (i = 0; i < new_s->nr_blocks; i++) {
1088 ret = mark_stripe_bucket(trans, new, i, flags);
1093 ret = update_replicas(c, new, &m->r.e,
1094 ((s64) m->sectors * m->nr_redundant),
1097 struct printbuf buf = PRINTBUF;
1099 bch2_bkey_val_to_text(&buf, c, new);
1100 bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
1101 printbuf_exit(&buf);
1109 int bch2_mark_inode(struct btree_trans *trans,
1110 struct bkey_s_c old, struct bkey_s_c new,
1113 struct bch_fs *c = trans->c;
1114 struct bch_fs_usage __percpu *fs_usage;
1115 u64 journal_seq = trans->journal_res.seq;
1117 if (flags & BTREE_TRIGGER_INSERT) {
1118 struct bch_inode_v2 *v = (struct bch_inode_v2 *) new.v;
1120 BUG_ON(!journal_seq);
1121 BUG_ON(new.k->type != KEY_TYPE_inode_v2);
1123 v->bi_journal_seq = cpu_to_le64(journal_seq);
1126 if (flags & BTREE_TRIGGER_GC) {
1127 percpu_down_read(&c->mark_lock);
1130 fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
1131 fs_usage->nr_inodes += bkey_is_inode(new.k);
1132 fs_usage->nr_inodes -= bkey_is_inode(old.k);
1135 percpu_up_read(&c->mark_lock);
1140 int bch2_mark_reservation(struct btree_trans *trans,
1141 struct bkey_s_c old, struct bkey_s_c new,
1144 struct bch_fs *c = trans->c;
1145 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
1146 struct bch_fs_usage __percpu *fs_usage;
1147 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1148 s64 sectors = (s64) k.k->size;
1150 BUG_ON(!(flags & BTREE_TRIGGER_GC));
1152 if (flags & BTREE_TRIGGER_OVERWRITE)
1154 sectors *= replicas;
1156 percpu_down_read(&c->mark_lock);
1159 fs_usage = fs_usage_ptr(c, trans->journal_res.seq, flags & BTREE_TRIGGER_GC);
1160 replicas = clamp_t(unsigned, replicas, 1,
1161 ARRAY_SIZE(fs_usage->persistent_reserved));
1163 fs_usage->reserved += sectors;
1164 fs_usage->persistent_reserved[replicas - 1] += sectors;
1167 percpu_up_read(&c->mark_lock);
1172 static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
1173 struct bkey_s_c_reflink_p p,
1175 u64 *idx, unsigned flags, size_t r_idx)
1177 struct bch_fs *c = trans->c;
1178 struct reflink_gc *r;
1179 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1182 struct printbuf buf = PRINTBUF;
1184 if (r_idx >= c->reflink_gc_nr)
1187 r = genradix_ptr(&c->reflink_gc_table, r_idx);
1188 next_idx = min(next_idx, r->offset - r->size);
1189 if (*idx < next_idx)
1192 BUG_ON((s64) r->refcount + add < 0);
1198 if (fsck_err(c, "pointer to missing indirect extent\n"
1200 " missing range %llu-%llu",
1201 (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
1203 struct bkey_i_error new;
1206 new.k.type = KEY_TYPE_error;
1207 new.k.p = bkey_start_pos(p.k);
1208 new.k.p.offset += *idx - start;
1209 bch2_key_resize(&new.k, next_idx - *idx);
1210 ret = __bch2_btree_insert(trans, BTREE_ID_extents, &new.k_i);
1215 printbuf_exit(&buf);
1219 int bch2_mark_reflink_p(struct btree_trans *trans,
1220 struct bkey_s_c old, struct bkey_s_c new,
1223 struct bch_fs *c = trans->c;
1224 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
1225 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
1226 struct reflink_gc *ref;
1228 u64 idx = le64_to_cpu(p.v->idx), start = idx;
1229 u64 end = le64_to_cpu(p.v->idx) + p.k->size;
1232 BUG_ON(!(flags & BTREE_TRIGGER_GC));
1234 if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) {
1235 idx -= le32_to_cpu(p.v->front_pad);
1236 end += le32_to_cpu(p.v->back_pad);
1240 r = c->reflink_gc_nr;
1242 m = l + (r - l) / 2;
1244 ref = genradix_ptr(&c->reflink_gc_table, m);
1245 if (ref->offset <= idx)
1251 while (idx < end && !ret)
1252 ret = __bch2_mark_reflink_p(trans, p, start, end,
1258 static noinline __cold
1259 void fs_usage_apply_warn(struct btree_trans *trans,
1260 unsigned disk_res_sectors,
1261 s64 should_not_have_added)
1263 struct bch_fs *c = trans->c;
1264 struct btree_insert_entry *i;
1265 struct printbuf buf = PRINTBUF;
1267 bch_err(c, "disk usage increased %lli more than %u sectors reserved",
1268 should_not_have_added, disk_res_sectors);
1270 trans_for_each_update(trans, i) {
1271 struct bkey_s_c old = { &i->old_k, i->old_v };
1273 pr_err("while inserting");
1274 printbuf_reset(&buf);
1275 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
1276 pr_err(" %s", buf.buf);
1277 pr_err("overlapping with");
1278 printbuf_reset(&buf);
1279 bch2_bkey_val_to_text(&buf, c, old);
1280 pr_err(" %s", buf.buf);
1284 printbuf_exit(&buf);
1287 int bch2_trans_fs_usage_apply(struct btree_trans *trans,
1288 struct replicas_delta_list *deltas)
1290 struct bch_fs *c = trans->c;
1291 static int warned_disk_usage = 0;
1293 unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1294 struct replicas_delta *d = deltas->d, *d2;
1295 struct replicas_delta *top = (void *) deltas->d + deltas->used;
1296 struct bch_fs_usage *dst;
1297 s64 added = 0, should_not_have_added;
1300 percpu_down_read(&c->mark_lock);
1302 dst = fs_usage_ptr(c, trans->journal_res.seq, false);
1304 for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
1305 switch (d->r.data_type) {
1306 case BCH_DATA_btree:
1308 case BCH_DATA_parity:
1312 if (__update_replicas(c, dst, &d->r, d->delta))
1316 dst->nr_inodes += deltas->nr_inodes;
1318 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
1319 added += deltas->persistent_reserved[i];
1320 dst->reserved += deltas->persistent_reserved[i];
1321 dst->persistent_reserved[i] += deltas->persistent_reserved[i];
1325 * Not allowed to reduce sectors_available except by getting a
1328 should_not_have_added = added - (s64) disk_res_sectors;
1329 if (unlikely(should_not_have_added > 0)) {
1330 u64 old, new, v = atomic64_read(&c->sectors_available);
1334 new = max_t(s64, 0, old - should_not_have_added);
1335 } while ((v = atomic64_cmpxchg(&c->sectors_available,
1338 added -= should_not_have_added;
1343 trans->disk_res->sectors -= added;
1344 this_cpu_sub(*c->online_reserved, added);
1348 percpu_up_read(&c->mark_lock);
1350 if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
1351 fs_usage_apply_warn(trans, disk_res_sectors, should_not_have_added);
1354 /* revert changes: */
1355 for (d2 = deltas->d; d2 != d; d2 = replicas_delta_next(d2))
1356 BUG_ON(__update_replicas(c, dst, &d2->r, -d2->delta));
1359 percpu_up_read(&c->mark_lock);
1365 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1366 enum btree_id btree_id, unsigned level,
1367 struct bkey_s_c k, struct extent_ptr_decoded p,
1370 bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
1371 struct btree_iter iter;
1372 struct bkey_i_alloc_v4 *a;
1373 struct bpos bucket_pos;
1374 struct bch_backpointer bp;
1378 bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket_pos, &bp);
1379 sectors = bp.bucket_len;
1383 a = bch2_trans_start_alloc_update(trans, &iter, bucket_pos);
1387 ret = __mark_pointer(trans, k, &p.ptr, sectors, bp.data_type,
1388 a->v.gen, &a->v.data_type,
1389 &a->v.dirty_sectors, &a->v.cached_sectors);
1393 if (!p.ptr.cached) {
1395 ? bch2_bucket_backpointer_add(trans, a, bp, k)
1396 : bch2_bucket_backpointer_del(trans, a, bp, k);
1401 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1403 bch2_trans_iter_exit(trans, &iter);
1407 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1408 struct extent_ptr_decoded p,
1409 s64 sectors, enum bch_data_type data_type)
1411 struct btree_iter iter;
1413 struct bkey_i_stripe *s;
1414 struct bch_replicas_padded r;
1417 bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes, POS(0, p.ec.idx),
1419 BTREE_ITER_WITH_UPDATES);
1420 k = bch2_btree_iter_peek_slot(&iter);
1425 if (k.k->type != KEY_TYPE_stripe) {
1426 bch2_trans_inconsistent(trans,
1427 "pointer to nonexistent stripe %llu",
1433 if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) {
1434 bch2_trans_inconsistent(trans,
1435 "stripe pointer doesn't match stripe %llu",
1441 s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1442 ret = PTR_ERR_OR_ZERO(s);
1446 bkey_reassemble(&s->k_i, k);
1447 stripe_blockcount_set(&s->v, p.ec.block,
1448 stripe_blockcount_get(&s->v, p.ec.block) +
1451 ret = bch2_trans_update(trans, &iter, &s->k_i, 0);
1455 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1456 r.e.data_type = data_type;
1457 update_replicas_list(trans, &r.e, sectors);
1459 bch2_trans_iter_exit(trans, &iter);
1463 int bch2_trans_mark_extent(struct btree_trans *trans,
1464 enum btree_id btree_id, unsigned level,
1465 struct bkey_s_c old, struct bkey_i *new,
1468 struct bch_fs *c = trans->c;
1469 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
1471 : bkey_i_to_s_c(new);
1472 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1473 const union bch_extent_entry *entry;
1474 struct extent_ptr_decoded p;
1475 struct bch_replicas_padded r;
1476 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
1479 s64 sectors = bkey_is_btree_ptr(k.k)
1482 s64 dirty_sectors = 0;
1486 r.e.data_type = data_type;
1488 r.e.nr_required = 1;
1490 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1491 s64 disk_sectors = ptr_disk_sectors(sectors, p);
1493 if (flags & BTREE_TRIGGER_OVERWRITE)
1494 disk_sectors = -disk_sectors;
1496 ret = bch2_trans_mark_pointer(trans, btree_id, level, k, p, flags);
1504 update_cached_sectors_list(trans, p.ptr.dev,
1506 } else if (!p.has_ec) {
1507 dirty_sectors += disk_sectors;
1508 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1510 ret = bch2_trans_mark_stripe_ptr(trans, p,
1511 disk_sectors, data_type);
1515 r.e.nr_required = 0;
1520 update_replicas_list(trans, &r.e, dirty_sectors);
1525 static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
1526 struct bkey_s_c_stripe s,
1527 unsigned idx, bool deleting)
1529 struct bch_fs *c = trans->c;
1530 const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
1531 struct btree_iter iter;
1532 struct bkey_i_alloc_v4 *a;
1533 enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
1534 ? BCH_DATA_parity : 0;
1535 s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
1541 a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(c, ptr));
1545 ret = check_bucket_ref(c, s.s_c, ptr, sectors, data_type,
1546 a->v.gen, a->v.data_type,
1547 a->v.dirty_sectors, a->v.cached_sectors);
1552 if (bch2_trans_inconsistent_on(a->v.stripe ||
1553 a->v.stripe_redundancy, trans,
1554 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
1555 iter.pos.inode, iter.pos.offset, a->v.gen,
1556 bch2_data_types[a->v.data_type],
1558 a->v.stripe, s.k->p.offset)) {
1563 if (bch2_trans_inconsistent_on(data_type && a->v.dirty_sectors, trans,
1564 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
1565 iter.pos.inode, iter.pos.offset, a->v.gen,
1566 bch2_data_types[a->v.data_type],
1573 a->v.stripe = s.k->p.offset;
1574 a->v.stripe_redundancy = s.v->nr_redundant;
1576 if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
1577 a->v.stripe_redundancy != s.v->nr_redundant, trans,
1578 "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
1579 iter.pos.inode, iter.pos.offset, a->v.gen,
1580 s.k->p.offset, a->v.stripe)) {
1586 a->v.stripe_redundancy = 0;
1589 a->v.dirty_sectors += sectors;
1591 a->v.data_type = !deleting ? data_type : 0;
1593 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1597 bch2_trans_iter_exit(trans, &iter);
1601 int bch2_trans_mark_stripe(struct btree_trans *trans,
1602 enum btree_id btree_id, unsigned level,
1603 struct bkey_s_c old, struct bkey_i *new,
1606 const struct bch_stripe *old_s = NULL;
1607 struct bch_stripe *new_s = NULL;
1608 struct bch_replicas_padded r;
1609 unsigned i, nr_blocks;
1612 if (old.k->type == KEY_TYPE_stripe)
1613 old_s = bkey_s_c_to_stripe(old).v;
1614 if (new->k.type == KEY_TYPE_stripe)
1615 new_s = &bkey_i_to_stripe(new)->v;
1618 * If the pointers aren't changing, we don't need to do anything:
1620 if (new_s && old_s &&
1621 new_s->nr_blocks == old_s->nr_blocks &&
1622 new_s->nr_redundant == old_s->nr_redundant &&
1623 !memcmp(old_s->ptrs, new_s->ptrs,
1624 new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
1627 BUG_ON(new_s && old_s &&
1628 (new_s->nr_blocks != old_s->nr_blocks ||
1629 new_s->nr_redundant != old_s->nr_redundant));
1631 nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
1634 s64 sectors = le16_to_cpu(new_s->sectors);
1636 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(new));
1637 update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant);
1641 s64 sectors = -((s64) le16_to_cpu(old_s->sectors));
1643 bch2_bkey_to_replicas(&r.e, old);
1644 update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant);
1647 for (i = 0; i < nr_blocks; i++) {
1648 if (new_s && old_s &&
1649 !memcmp(&new_s->ptrs[i],
1651 sizeof(new_s->ptrs[i])))
1655 ret = bch2_trans_mark_stripe_bucket(trans,
1656 bkey_i_to_s_c_stripe(new), i, false);
1662 ret = bch2_trans_mark_stripe_bucket(trans,
1663 bkey_s_c_to_stripe(old), i, true);
1672 int bch2_trans_mark_inode(struct btree_trans *trans,
1673 enum btree_id btree_id, unsigned level,
1674 struct bkey_s_c old,
1678 int nr = bkey_is_inode(&new->k) - bkey_is_inode(old.k);
1681 struct replicas_delta_list *d =
1682 replicas_deltas_realloc(trans, 0);
1689 int bch2_trans_mark_reservation(struct btree_trans *trans,
1690 enum btree_id btree_id, unsigned level,
1691 struct bkey_s_c old,
1695 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
1697 : bkey_i_to_s_c(new);
1698 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1699 s64 sectors = (s64) k.k->size;
1700 struct replicas_delta_list *d;
1702 if (flags & BTREE_TRIGGER_OVERWRITE)
1704 sectors *= replicas;
1706 d = replicas_deltas_realloc(trans, 0);
1708 replicas = clamp_t(unsigned, replicas, 1,
1709 ARRAY_SIZE(d->persistent_reserved));
1711 d->persistent_reserved[replicas - 1] += sectors;
1715 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1716 struct bkey_s_c_reflink_p p,
1717 u64 *idx, unsigned flags)
1719 struct bch_fs *c = trans->c;
1720 struct btree_iter iter;
1724 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1725 struct printbuf buf = PRINTBUF;
1728 bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx),
1730 BTREE_ITER_WITH_UPDATES);
1731 k = bch2_btree_iter_peek_slot(&iter);
1736 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1737 ret = PTR_ERR_OR_ZERO(n);
1741 bkey_reassemble(n, k);
1743 refcount = bkey_refcount(n);
1745 bch2_bkey_val_to_text(&buf, c, p.s_c);
1746 bch2_trans_inconsistent(trans,
1747 "nonexistent indirect extent at %llu while marking\n %s",
1753 if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
1754 bch2_bkey_val_to_text(&buf, c, p.s_c);
1755 bch2_trans_inconsistent(trans,
1756 "indirect extent refcount underflow at %llu while marking\n %s",
1762 if (flags & BTREE_TRIGGER_INSERT) {
1763 struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
1766 pad = max_t(s64, le32_to_cpu(v->front_pad),
1767 le64_to_cpu(v->idx) - bkey_start_offset(k.k));
1768 BUG_ON(pad > U32_MAX);
1769 v->front_pad = cpu_to_le32(pad);
1771 pad = max_t(s64, le32_to_cpu(v->back_pad),
1772 k.k->p.offset - p.k->size - le64_to_cpu(v->idx));
1773 BUG_ON(pad > U32_MAX);
1774 v->back_pad = cpu_to_le32(pad);
1777 le64_add_cpu(refcount, add);
1779 bch2_btree_iter_set_pos_to_extent_start(&iter);
1780 ret = bch2_trans_update(trans, &iter, n, 0);
1784 *idx = k.k->p.offset;
1786 bch2_trans_iter_exit(trans, &iter);
1787 printbuf_exit(&buf);
1791 int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1792 enum btree_id btree_id, unsigned level,
1793 struct bkey_s_c old,
1797 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
1799 : bkey_i_to_s_c(new);
1800 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
1804 if (flags & BTREE_TRIGGER_INSERT) {
1805 struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
1807 v->front_pad = v->back_pad = 0;
1810 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
1811 end_idx = le64_to_cpu(p.v->idx) + p.k->size +
1812 le32_to_cpu(p.v->back_pad);
1814 while (idx < end_idx && !ret)
1815 ret = __bch2_trans_mark_reflink_p(trans, p, &idx, flags);
1820 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1821 struct bch_dev *ca, size_t b,
1822 enum bch_data_type type,
1825 struct bch_fs *c = trans->c;
1826 struct btree_iter iter;
1827 struct bkey_i_alloc_v4 *a;
1831 * Backup superblock might be past the end of our normal usable space:
1833 if (b >= ca->mi.nbuckets)
1836 a = bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
1840 if (a->v.data_type && a->v.data_type != type) {
1841 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1842 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
1844 iter.pos.inode, iter.pos.offset, a->v.gen,
1845 bch2_data_types[a->v.data_type],
1846 bch2_data_types[type],
1847 bch2_data_types[type]);
1852 a->v.data_type = type;
1853 a->v.dirty_sectors = sectors;
1855 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1859 bch2_trans_iter_exit(trans, &iter);
1863 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1864 struct bch_dev *ca, size_t b,
1865 enum bch_data_type type,
1868 return commit_do(trans, NULL, NULL, 0,
1869 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
1872 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
1875 enum bch_data_type type,
1876 u64 *bucket, unsigned *bucket_sectors)
1879 u64 b = sector_to_bucket(ca, start);
1881 min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1883 if (b != *bucket && *bucket_sectors) {
1884 int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1885 type, *bucket_sectors);
1889 *bucket_sectors = 0;
1893 *bucket_sectors += sectors;
1895 } while (start < end);
1900 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
1903 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1905 unsigned i, bucket_sectors = 0;
1908 for (i = 0; i < layout->nr_superblocks; i++) {
1909 u64 offset = le64_to_cpu(layout->sb_offset[i]);
1911 if (offset == BCH_SB_SECTOR) {
1912 ret = bch2_trans_mark_metadata_sectors(trans, ca,
1914 BCH_DATA_sb, &bucket, &bucket_sectors);
1919 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1920 offset + (1 << layout->sb_max_size_bits),
1921 BCH_DATA_sb, &bucket, &bucket_sectors);
1926 if (bucket_sectors) {
1927 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1928 bucket, BCH_DATA_sb, bucket_sectors);
1933 for (i = 0; i < ca->journal.nr; i++) {
1934 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1935 ca->journal.buckets[i],
1936 BCH_DATA_journal, ca->mi.bucket_size);
1944 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
1946 return bch2_trans_run(c, __bch2_trans_mark_dev_sb(&trans, ca));
1949 /* Disk reservations: */
1951 #define SECTORS_CACHE 1024
1953 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1954 u64 sectors, int flags)
1956 struct bch_fs_pcpu *pcpu;
1958 s64 sectors_available;
1961 percpu_down_read(&c->mark_lock);
1963 pcpu = this_cpu_ptr(c->pcpu);
1965 if (sectors <= pcpu->sectors_available)
1968 v = atomic64_read(&c->sectors_available);
1971 get = min((u64) sectors + SECTORS_CACHE, old);
1973 if (get < sectors) {
1977 } while ((v = atomic64_cmpxchg(&c->sectors_available,
1978 old, old - get)) != old);
1980 pcpu->sectors_available += get;
1983 pcpu->sectors_available -= sectors;
1984 this_cpu_add(*c->online_reserved, sectors);
1985 res->sectors += sectors;
1988 percpu_up_read(&c->mark_lock);
1992 mutex_lock(&c->sectors_available_lock);
1994 percpu_u64_set(&c->pcpu->sectors_available, 0);
1995 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
1997 if (sectors <= sectors_available ||
1998 (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1999 atomic64_set(&c->sectors_available,
2000 max_t(s64, 0, sectors_available - sectors));
2001 this_cpu_add(*c->online_reserved, sectors);
2002 res->sectors += sectors;
2005 atomic64_set(&c->sectors_available, sectors_available);
2006 ret = -BCH_ERR_ENOSPC_disk_reservation;
2009 mutex_unlock(&c->sectors_available_lock);
2010 percpu_up_read(&c->mark_lock);
2015 /* Startup/shutdown: */
2017 static void bucket_gens_free_rcu(struct rcu_head *rcu)
2019 struct bucket_gens *buckets =
2020 container_of(rcu, struct bucket_gens, rcu);
2022 kvpfree(buckets, sizeof(*buckets) + buckets->nbuckets);
2025 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2027 struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
2028 unsigned long *buckets_nouse = NULL;
2029 bool resize = ca->bucket_gens != NULL;
2032 if (!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
2033 GFP_KERNEL|__GFP_ZERO)) ||
2034 (c->opts.buckets_nouse &&
2035 !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2036 sizeof(unsigned long),
2037 GFP_KERNEL|__GFP_ZERO))))
2040 bucket_gens->first_bucket = ca->mi.first_bucket;
2041 bucket_gens->nbuckets = nbuckets;
2043 bch2_copygc_stop(c);
2046 down_write(&c->gc_lock);
2047 down_write(&ca->bucket_lock);
2048 percpu_down_write(&c->mark_lock);
2051 old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
2054 size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets);
2056 memcpy(bucket_gens->b,
2060 memcpy(buckets_nouse,
2062 BITS_TO_LONGS(n) * sizeof(unsigned long));
2065 rcu_assign_pointer(ca->bucket_gens, bucket_gens);
2066 bucket_gens = old_bucket_gens;
2068 swap(ca->buckets_nouse, buckets_nouse);
2070 nbuckets = ca->mi.nbuckets;
2073 percpu_up_write(&c->mark_lock);
2074 up_write(&ca->bucket_lock);
2075 up_write(&c->gc_lock);
2080 kvpfree(buckets_nouse,
2081 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2083 call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
2088 void bch2_dev_buckets_free(struct bch_dev *ca)
2092 kvpfree(ca->buckets_nouse,
2093 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2094 kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
2095 sizeof(struct bucket_gens) + ca->mi.nbuckets);
2097 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
2098 free_percpu(ca->usage[i]);
2099 kfree(ca->usage_base);
2102 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2106 ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
2107 if (!ca->usage_base)
2110 for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
2111 ca->usage[i] = alloc_percpu(struct bch_dev_usage);
2116 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;