1 // SPDX-License-Identifier: GPL-2.0
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
9 #include "alloc_background.h"
10 #include "backpointers.h"
13 #include "btree_update.h"
15 #include "buckets_waiting_for_journal.h"
23 #include "subvolume.h"
26 #include <linux/preempt.h>
28 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
29 enum bch_data_type data_type,
34 fs_usage->btree += sectors;
38 fs_usage->data += sectors;
41 fs_usage->cached += sectors;
48 void bch2_fs_usage_initialize(struct bch_fs *c)
50 struct bch_fs_usage *usage;
54 percpu_down_write(&c->mark_lock);
55 usage = c->usage_base;
57 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
58 bch2_fs_usage_acc_to_base(c, i);
60 for (i = 0; i < BCH_REPLICAS_MAX; i++)
61 usage->reserved += usage->persistent_reserved[i];
63 for (i = 0; i < c->replicas.nr; i++) {
64 struct bch_replicas_entry_v1 *e =
65 cpu_replicas_entry(&c->replicas, i);
67 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
70 for_each_member_device(ca, c, i) {
71 struct bch_dev_usage dev = bch2_dev_usage_read(ca);
73 usage->hidden += (dev.d[BCH_DATA_sb].buckets +
74 dev.d[BCH_DATA_journal].buckets) *
78 percpu_up_write(&c->mark_lock);
81 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
85 BUG_ON(!gc && !journal_seq);
87 return this_cpu_ptr(gc
89 : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
92 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
94 struct bch_fs *c = ca->fs;
95 unsigned seq, i, u64s = dev_usage_u64s();
98 seq = read_seqcount_begin(&c->usage_lock);
99 memcpy(usage, ca->usage_base, u64s * sizeof(u64));
100 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
101 acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage[i], u64s);
102 } while (read_seqcount_retry(&c->usage_lock, seq));
105 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
107 ssize_t offset = v - (u64 *) c->usage_base;
111 BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
112 percpu_rwsem_assert_held(&c->mark_lock);
115 seq = read_seqcount_begin(&c->usage_lock);
118 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
119 ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
120 } while (read_seqcount_retry(&c->usage_lock, seq));
125 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
127 struct bch_fs_usage_online *ret;
128 unsigned nr_replicas = READ_ONCE(c->replicas.nr);
131 ret = kmalloc(__fs_usage_online_u64s(nr_replicas) * sizeof(u64), GFP_KERNEL);
135 percpu_down_read(&c->mark_lock);
137 if (nr_replicas != c->replicas.nr) {
138 nr_replicas = c->replicas.nr;
139 percpu_up_read(&c->mark_lock);
144 ret->online_reserved = percpu_u64_get(c->online_reserved);
147 seq = read_seqcount_begin(&c->usage_lock);
148 unsafe_memcpy(&ret->u, c->usage_base,
149 __fs_usage_u64s(nr_replicas) * sizeof(u64),
150 "embedded variable length struct");
151 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
152 acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i],
153 __fs_usage_u64s(nr_replicas));
154 } while (read_seqcount_retry(&c->usage_lock, seq));
159 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
162 unsigned i, u64s = fs_usage_u64s(c);
164 BUG_ON(idx >= ARRAY_SIZE(c->usage));
167 write_seqcount_begin(&c->usage_lock);
169 acc_u64s_percpu((u64 *) c->usage_base,
170 (u64 __percpu *) c->usage[idx], u64s);
171 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
174 for_each_member_device_rcu(ca, c, i, NULL) {
175 u64s = dev_usage_u64s();
177 acc_u64s_percpu((u64 *) ca->usage_base,
178 (u64 __percpu *) ca->usage[idx], u64s);
179 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
183 write_seqcount_end(&c->usage_lock);
187 void bch2_fs_usage_to_text(struct printbuf *out,
189 struct bch_fs_usage_online *fs_usage)
193 prt_printf(out, "capacity:\t\t\t%llu\n", c->capacity);
195 prt_printf(out, "hidden:\t\t\t\t%llu\n",
197 prt_printf(out, "data:\t\t\t\t%llu\n",
199 prt_printf(out, "cached:\t\t\t\t%llu\n",
201 prt_printf(out, "reserved:\t\t\t%llu\n",
202 fs_usage->u.reserved);
203 prt_printf(out, "nr_inodes:\t\t\t%llu\n",
204 fs_usage->u.nr_inodes);
205 prt_printf(out, "online reserved:\t\t%llu\n",
206 fs_usage->online_reserved);
209 i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
211 prt_printf(out, "%u replicas:\n", i + 1);
212 prt_printf(out, "\treserved:\t\t%llu\n",
213 fs_usage->u.persistent_reserved[i]);
216 for (i = 0; i < c->replicas.nr; i++) {
217 struct bch_replicas_entry_v1 *e =
218 cpu_replicas_entry(&c->replicas, i);
220 prt_printf(out, "\t");
221 bch2_replicas_entry_to_text(out, e);
222 prt_printf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
226 static u64 reserve_factor(u64 r)
228 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
231 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
233 return min(fs_usage->u.hidden +
236 reserve_factor(fs_usage->u.reserved +
237 fs_usage->online_reserved),
241 static struct bch_fs_usage_short
242 __bch2_fs_usage_read_short(struct bch_fs *c)
244 struct bch_fs_usage_short ret;
247 ret.capacity = c->capacity -
248 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
250 data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
251 bch2_fs_usage_read_one(c, &c->usage_base->btree);
252 reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
253 percpu_u64_get(c->online_reserved);
255 ret.used = min(ret.capacity, data + reserve_factor(reserved));
256 ret.free = ret.capacity - ret.used;
258 ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
263 struct bch_fs_usage_short
264 bch2_fs_usage_read_short(struct bch_fs *c)
266 struct bch_fs_usage_short ret;
268 percpu_down_read(&c->mark_lock);
269 ret = __bch2_fs_usage_read_short(c);
270 percpu_up_read(&c->mark_lock);
275 void bch2_dev_usage_init(struct bch_dev *ca)
277 ca->usage_base->d[BCH_DATA_free].buckets = ca->mi.nbuckets - ca->mi.first_bucket;
280 void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage)
283 prt_str(out, "buckets");
285 prt_str(out, "sectors");
287 prt_str(out, "fragmented");
291 for (unsigned i = 0; i < BCH_DATA_NR; i++) {
292 prt_str(out, bch2_data_types[i]);
294 prt_u64(out, usage->d[i].buckets);
296 prt_u64(out, usage->d[i].sectors);
298 prt_u64(out, usage->d[i].fragmented);
304 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
305 struct bch_alloc_v4 old,
306 struct bch_alloc_v4 new,
307 u64 journal_seq, bool gc)
309 struct bch_fs_usage *fs_usage;
310 struct bch_dev_usage *u;
313 fs_usage = fs_usage_ptr(c, journal_seq, gc);
315 if (data_type_is_hidden(old.data_type))
316 fs_usage->hidden -= ca->mi.bucket_size;
317 if (data_type_is_hidden(new.data_type))
318 fs_usage->hidden += ca->mi.bucket_size;
320 u = dev_usage_ptr(ca, journal_seq, gc);
322 u->d[old.data_type].buckets--;
323 u->d[new.data_type].buckets++;
325 u->d[old.data_type].sectors -= bch2_bucket_sectors_dirty(old);
326 u->d[new.data_type].sectors += bch2_bucket_sectors_dirty(new);
328 u->d[BCH_DATA_cached].sectors += new.cached_sectors;
329 u->d[BCH_DATA_cached].sectors -= old.cached_sectors;
331 u->d[old.data_type].fragmented -= bch2_bucket_sectors_fragmented(ca, old);
332 u->d[new.data_type].fragmented += bch2_bucket_sectors_fragmented(ca, new);
337 static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
339 return (struct bch_alloc_v4) {
341 .data_type = b.data_type,
342 .dirty_sectors = b.dirty_sectors,
343 .cached_sectors = b.cached_sectors,
348 static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
349 struct bucket old, struct bucket new)
351 bch2_dev_usage_update(c, ca,
352 bucket_m_to_alloc(old),
353 bucket_m_to_alloc(new),
357 static inline int __update_replicas(struct bch_fs *c,
358 struct bch_fs_usage *fs_usage,
359 struct bch_replicas_entry_v1 *r,
362 int idx = bch2_replicas_entry_idx(c, r);
367 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
368 fs_usage->replicas[idx] += sectors;
372 static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
373 struct bch_replicas_entry_v1 *r, s64 sectors,
374 unsigned journal_seq, bool gc)
376 struct bch_fs_usage *fs_usage;
378 struct printbuf buf = PRINTBUF;
380 percpu_down_read(&c->mark_lock);
382 idx = bch2_replicas_entry_idx(c, r);
384 fsck_err(c, ptr_to_missing_replicas_entry,
385 "no replicas entry\n while marking %s",
386 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
387 percpu_up_read(&c->mark_lock);
388 ret = bch2_mark_replicas(c, r);
389 percpu_down_read(&c->mark_lock);
393 idx = bch2_replicas_entry_idx(c, r);
401 fs_usage = fs_usage_ptr(c, journal_seq, gc);
402 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
403 fs_usage->replicas[idx] += sectors;
407 percpu_up_read(&c->mark_lock);
412 static inline int update_cached_sectors(struct bch_fs *c,
414 unsigned dev, s64 sectors,
415 unsigned journal_seq, bool gc)
417 struct bch_replicas_padded r;
419 bch2_replicas_entry_cached(&r.e, dev);
421 return update_replicas(c, k, &r.e, sectors, journal_seq, gc);
424 static int __replicas_deltas_realloc(struct btree_trans *trans, unsigned more,
427 struct replicas_delta_list *d = trans->fs_usage_deltas;
428 unsigned new_size = d ? (d->size + more) * 2 : 128;
429 unsigned alloc_size = sizeof(*d) + new_size;
431 WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
433 if (!d || d->used + more > d->size) {
434 d = krealloc(d, alloc_size, gfp|__GFP_ZERO);
437 if (alloc_size > REPLICAS_DELTA_LIST_MAX)
440 d = mempool_alloc(&trans->c->replicas_delta_pool, gfp);
444 memset(d, 0, REPLICAS_DELTA_LIST_MAX);
446 if (trans->fs_usage_deltas)
447 memcpy(d, trans->fs_usage_deltas,
448 trans->fs_usage_deltas->size + sizeof(*d));
450 new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
451 kfree(trans->fs_usage_deltas);
455 trans->fs_usage_deltas = d;
461 int bch2_replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
463 return allocate_dropping_locks_errcode(trans,
464 __replicas_deltas_realloc(trans, more, _gfp));
467 int bch2_update_replicas_list(struct btree_trans *trans,
468 struct bch_replicas_entry_v1 *r,
471 struct replicas_delta_list *d;
472 struct replicas_delta *n;
479 b = replicas_entry_bytes(r) + 8;
480 ret = bch2_replicas_deltas_realloc(trans, b);
484 d = trans->fs_usage_deltas;
485 n = (void *) d->d + d->used;
487 unsafe_memcpy((void *) n + offsetof(struct replicas_delta, r),
488 r, replicas_entry_bytes(r),
489 "flexible array member embedded in strcuct with padding");
490 bch2_replicas_entry_sort(&n->r);
495 int bch2_update_cached_sectors_list(struct btree_trans *trans, unsigned dev, s64 sectors)
497 struct bch_replicas_padded r;
499 bch2_replicas_entry_cached(&r.e, dev);
501 return bch2_update_replicas_list(trans, &r.e, sectors);
504 int bch2_mark_alloc(struct btree_trans *trans,
505 enum btree_id btree, unsigned level,
506 struct bkey_s_c old, struct bkey_s_c new,
509 bool gc = flags & BTREE_TRIGGER_GC;
510 u64 journal_seq = trans->journal_res.seq;
511 u64 bucket_journal_seq;
512 struct bch_fs *c = trans->c;
513 struct bch_alloc_v4 old_a_convert, new_a_convert;
514 const struct bch_alloc_v4 *old_a, *new_a;
519 * alloc btree is read in by bch2_alloc_read, not gc:
521 if ((flags & BTREE_TRIGGER_GC) &&
522 !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
525 if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
526 "alloc key for invalid device or bucket"))
529 ca = bch_dev_bkey_exists(c, new.k->p.inode);
531 old_a = bch2_alloc_to_v4(old, &old_a_convert);
532 new_a = bch2_alloc_to_v4(new, &new_a_convert);
534 bucket_journal_seq = new_a->journal_seq;
536 if ((flags & BTREE_TRIGGER_INSERT) &&
537 data_type_is_empty(old_a->data_type) !=
538 data_type_is_empty(new_a->data_type) &&
539 new.k->type == KEY_TYPE_alloc_v4) {
540 struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
542 EBUG_ON(!journal_seq);
545 * If the btree updates referring to a bucket weren't flushed
546 * before the bucket became empty again, then the we don't have
547 * to wait on a journal flush before we can reuse the bucket:
549 v->journal_seq = bucket_journal_seq =
550 data_type_is_empty(new_a->data_type) &&
551 (journal_seq == v->journal_seq ||
552 bch2_journal_noflush_seq(&c->journal, v->journal_seq))
556 if (!data_type_is_empty(old_a->data_type) &&
557 data_type_is_empty(new_a->data_type) &&
558 bucket_journal_seq) {
559 ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
560 c->journal.flushed_seq_ondisk,
561 new.k->p.inode, new.k->p.offset,
564 bch2_fs_fatal_error(c,
565 "error setting bucket_needs_journal_commit: %i", ret);
570 percpu_down_read(&c->mark_lock);
571 if (!gc && new_a->gen != old_a->gen)
572 *bucket_gen(ca, new.k->p.offset) = new_a->gen;
574 bch2_dev_usage_update(c, ca, *old_a, *new_a, journal_seq, gc);
577 struct bucket *g = gc_bucket(ca, new.k->p.offset);
583 g->data_type = new_a->data_type;
584 g->stripe = new_a->stripe;
585 g->stripe_redundancy = new_a->stripe_redundancy;
586 g->dirty_sectors = new_a->dirty_sectors;
587 g->cached_sectors = new_a->cached_sectors;
591 percpu_up_read(&c->mark_lock);
593 if (new_a->data_type == BCH_DATA_free &&
594 (!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
595 closure_wake_up(&c->freelist_wait);
597 if (new_a->data_type == BCH_DATA_need_discard &&
598 (!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
601 if (old_a->data_type != BCH_DATA_cached &&
602 new_a->data_type == BCH_DATA_cached &&
603 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
604 bch2_do_invalidates(c);
606 if (new_a->data_type == BCH_DATA_need_gc_gens)
612 int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
613 size_t b, enum bch_data_type data_type,
614 unsigned sectors, struct gc_pos pos,
617 struct bucket old, new, *g;
620 BUG_ON(!(flags & BTREE_TRIGGER_GC));
621 BUG_ON(data_type != BCH_DATA_sb &&
622 data_type != BCH_DATA_journal);
625 * Backup superblock might be past the end of our normal usable space:
627 if (b >= ca->mi.nbuckets)
630 percpu_down_read(&c->mark_lock);
631 g = gc_bucket(ca, b);
636 if (bch2_fs_inconsistent_on(g->data_type &&
637 g->data_type != data_type, c,
638 "different types of data in same bucket: %s, %s",
639 bch2_data_types[g->data_type],
640 bch2_data_types[data_type])) {
645 if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
646 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
647 ca->dev_idx, b, g->gen,
648 bch2_data_types[g->data_type ?: data_type],
649 g->dirty_sectors, sectors)) {
654 g->data_type = data_type;
655 g->dirty_sectors += sectors;
660 bch2_dev_usage_update_m(c, ca, old, new);
661 percpu_up_read(&c->mark_lock);
665 static int check_bucket_ref(struct btree_trans *trans,
667 const struct bch_extent_ptr *ptr,
668 s64 sectors, enum bch_data_type ptr_data_type,
669 u8 b_gen, u8 bucket_data_type,
672 struct bch_fs *c = trans->c;
673 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
674 size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
675 struct printbuf buf = PRINTBUF;
678 if (bucket_data_type == BCH_DATA_cached)
679 bucket_data_type = BCH_DATA_user;
681 if ((bucket_data_type == BCH_DATA_stripe && ptr_data_type == BCH_DATA_user) ||
682 (bucket_data_type == BCH_DATA_user && ptr_data_type == BCH_DATA_stripe))
683 bucket_data_type = ptr_data_type = BCH_DATA_stripe;
685 if (gen_after(ptr->gen, b_gen)) {
686 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
687 BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen,
688 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
690 ptr->dev, bucket_nr, b_gen,
691 bch2_data_types[bucket_data_type ?: ptr_data_type],
693 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
698 if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
699 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
700 BCH_FSCK_ERR_ptr_too_stale,
701 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
703 ptr->dev, bucket_nr, b_gen,
704 bch2_data_types[bucket_data_type ?: ptr_data_type],
706 (printbuf_reset(&buf),
707 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
712 if (b_gen != ptr->gen && !ptr->cached) {
713 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
714 BCH_FSCK_ERR_stale_dirty_ptr,
715 "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
717 ptr->dev, bucket_nr, b_gen,
718 *bucket_gen(ca, bucket_nr),
719 bch2_data_types[bucket_data_type ?: ptr_data_type],
721 (printbuf_reset(&buf),
722 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
727 if (b_gen != ptr->gen) {
732 if (!data_type_is_empty(bucket_data_type) &&
734 bucket_data_type != ptr_data_type) {
735 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
736 BCH_FSCK_ERR_ptr_bucket_data_type_mismatch,
737 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
739 ptr->dev, bucket_nr, b_gen,
740 bch2_data_types[bucket_data_type],
741 bch2_data_types[ptr_data_type],
742 (printbuf_reset(&buf),
743 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
748 if ((u64) bucket_sectors + sectors > U32_MAX) {
749 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
750 BCH_FSCK_ERR_bucket_sector_count_overflow,
751 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
753 ptr->dev, bucket_nr, b_gen,
754 bch2_data_types[bucket_data_type ?: ptr_data_type],
755 bucket_sectors, sectors,
756 (printbuf_reset(&buf),
757 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
765 bch2_dump_trans_updates(trans);
769 static int mark_stripe_bucket(struct btree_trans *trans,
774 struct bch_fs *c = trans->c;
775 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
776 unsigned nr_data = s->nr_blocks - s->nr_redundant;
777 bool parity = ptr_idx >= nr_data;
778 enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
779 s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
780 const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
781 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
782 struct bucket old, new, *g;
783 struct printbuf buf = PRINTBUF;
786 BUG_ON(!(flags & BTREE_TRIGGER_GC));
788 /* * XXX doesn't handle deletion */
790 percpu_down_read(&c->mark_lock);
791 g = PTR_GC_BUCKET(ca, ptr);
793 if (g->dirty_sectors ||
794 (g->stripe && g->stripe != k.k->p.offset)) {
795 bch2_fs_inconsistent(c,
796 "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
797 ptr->dev, PTR_BUCKET_NR(ca, ptr), g->gen,
798 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
806 ret = check_bucket_ref(trans, k, ptr, sectors, data_type,
807 g->gen, g->data_type,
812 g->data_type = data_type;
813 g->dirty_sectors += sectors;
815 g->stripe = k.k->p.offset;
816 g->stripe_redundancy = s->nr_redundant;
821 bch2_dev_usage_update_m(c, ca, old, new);
822 percpu_up_read(&c->mark_lock);
827 static int __mark_pointer(struct btree_trans *trans,
829 const struct bch_extent_ptr *ptr,
830 s64 sectors, enum bch_data_type ptr_data_type,
831 u8 bucket_gen, u8 *bucket_data_type,
832 u32 *dirty_sectors, u32 *cached_sectors)
834 u32 *dst_sectors = !ptr->cached
837 int ret = check_bucket_ref(trans, k, ptr, sectors, ptr_data_type,
838 bucket_gen, *bucket_data_type, *dst_sectors);
843 *dst_sectors += sectors;
845 if (!*dirty_sectors && !*cached_sectors)
846 *bucket_data_type = 0;
847 else if (*bucket_data_type != BCH_DATA_stripe)
848 *bucket_data_type = ptr_data_type;
853 static int bch2_mark_pointer(struct btree_trans *trans,
854 enum btree_id btree_id, unsigned level,
856 struct extent_ptr_decoded p,
860 struct bch_fs *c = trans->c;
861 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
862 struct bucket old, new, *g;
863 enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
867 BUG_ON(!(flags & BTREE_TRIGGER_GC));
869 percpu_down_read(&c->mark_lock);
870 g = PTR_GC_BUCKET(ca, &p.ptr);
874 bucket_data_type = g->data_type;
875 ret = __mark_pointer(trans, k, &p.ptr, sectors,
881 g->data_type = bucket_data_type;
886 bch2_dev_usage_update_m(c, ca, old, new);
887 percpu_up_read(&c->mark_lock);
892 static int bch2_mark_stripe_ptr(struct btree_trans *trans,
894 struct bch_extent_stripe_ptr p,
895 enum bch_data_type data_type,
899 struct bch_fs *c = trans->c;
900 struct bch_replicas_padded r;
903 BUG_ON(!(flags & BTREE_TRIGGER_GC));
905 m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
907 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
909 return -BCH_ERR_ENOMEM_mark_stripe_ptr;
912 mutex_lock(&c->ec_stripes_heap_lock);
914 if (!m || !m->alive) {
915 mutex_unlock(&c->ec_stripes_heap_lock);
916 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
918 bch2_inconsistent_error(c);
922 m->block_sectors[p.block] += sectors;
925 mutex_unlock(&c->ec_stripes_heap_lock);
927 r.e.data_type = data_type;
928 update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
933 static int __mark_extent(struct btree_trans *trans,
934 enum btree_id btree_id, unsigned level,
935 struct bkey_s_c k, unsigned flags)
937 u64 journal_seq = trans->journal_res.seq;
938 struct bch_fs *c = trans->c;
939 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
940 const union bch_extent_entry *entry;
941 struct extent_ptr_decoded p;
942 struct bch_replicas_padded r;
943 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
946 s64 sectors = bkey_is_btree_ptr(k.k)
949 s64 dirty_sectors = 0;
953 BUG_ON(!(flags & BTREE_TRIGGER_GC));
955 r.e.data_type = data_type;
959 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
960 s64 disk_sectors = ptr_disk_sectors(sectors, p);
962 if (flags & BTREE_TRIGGER_OVERWRITE)
963 disk_sectors = -disk_sectors;
965 ret = bch2_mark_pointer(trans, btree_id, level, k, p, disk_sectors, flags);
973 ret = update_cached_sectors(c, k, p.ptr.dev,
974 disk_sectors, journal_seq, true);
976 bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
981 } else if (!p.has_ec) {
982 dirty_sectors += disk_sectors;
983 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
985 ret = bch2_mark_stripe_ptr(trans, k, p.ec, data_type,
986 disk_sectors, flags);
991 * There may be other dirty pointers in this extent, but
992 * if so they're not required for mounting if we have an
993 * erasure coded pointer in this extent:
1000 ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true);
1002 struct printbuf buf = PRINTBUF;
1004 bch2_bkey_val_to_text(&buf, c, k);
1005 bch2_fs_fatal_error(c, "%s(): no replicas entry for %s", __func__, buf.buf);
1006 printbuf_exit(&buf);
1014 int bch2_mark_extent(struct btree_trans *trans,
1015 enum btree_id btree_id, unsigned level,
1016 struct bkey_s_c old, struct bkey_s_c new,
1019 return mem_trigger_run_overwrite_then_insert(__mark_extent, trans, btree_id, level, old, new, flags);
1022 int bch2_mark_stripe(struct btree_trans *trans,
1023 enum btree_id btree_id, unsigned level,
1024 struct bkey_s_c old, struct bkey_s_c new,
1027 bool gc = flags & BTREE_TRIGGER_GC;
1028 u64 journal_seq = trans->journal_res.seq;
1029 struct bch_fs *c = trans->c;
1030 u64 idx = new.k->p.offset;
1031 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1032 ? bkey_s_c_to_stripe(old).v : NULL;
1033 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1034 ? bkey_s_c_to_stripe(new).v : NULL;
1038 BUG_ON(gc && old_s);
1041 struct stripe *m = genradix_ptr(&c->stripes, idx);
1044 struct printbuf buf1 = PRINTBUF;
1045 struct printbuf buf2 = PRINTBUF;
1047 bch2_bkey_val_to_text(&buf1, c, old);
1048 bch2_bkey_val_to_text(&buf2, c, new);
1049 bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
1051 "new %s", idx, buf1.buf, buf2.buf);
1052 printbuf_exit(&buf2);
1053 printbuf_exit(&buf1);
1054 bch2_inconsistent_error(c);
1059 bch2_stripes_heap_del(c, m, idx);
1061 memset(m, 0, sizeof(*m));
1063 m->sectors = le16_to_cpu(new_s->sectors);
1064 m->algorithm = new_s->algorithm;
1065 m->nr_blocks = new_s->nr_blocks;
1066 m->nr_redundant = new_s->nr_redundant;
1067 m->blocks_nonempty = 0;
1069 for (i = 0; i < new_s->nr_blocks; i++)
1070 m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
1073 bch2_stripes_heap_insert(c, m, idx);
1075 bch2_stripes_heap_update(c, m, idx);
1078 struct gc_stripe *m =
1079 genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
1082 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
1084 return -BCH_ERR_ENOMEM_mark_stripe;
1087 * This will be wrong when we bring back runtime gc: we should
1088 * be unmarking the old key and then marking the new key
1091 m->sectors = le16_to_cpu(new_s->sectors);
1092 m->nr_blocks = new_s->nr_blocks;
1093 m->nr_redundant = new_s->nr_redundant;
1095 for (i = 0; i < new_s->nr_blocks; i++)
1096 m->ptrs[i] = new_s->ptrs[i];
1098 bch2_bkey_to_replicas(&m->r.e, new);
1101 * gc recalculates this field from stripe ptr
1104 memset(m->block_sectors, 0, sizeof(m->block_sectors));
1106 for (i = 0; i < new_s->nr_blocks; i++) {
1107 ret = mark_stripe_bucket(trans, new, i, flags);
1112 ret = update_replicas(c, new, &m->r.e,
1113 ((s64) m->sectors * m->nr_redundant),
1116 struct printbuf buf = PRINTBUF;
1118 bch2_bkey_val_to_text(&buf, c, new);
1119 bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
1120 printbuf_exit(&buf);
1128 static int __mark_reservation(struct btree_trans *trans,
1129 enum btree_id btree_id, unsigned level,
1130 struct bkey_s_c k, unsigned flags)
1132 struct bch_fs *c = trans->c;
1133 struct bch_fs_usage *fs_usage;
1134 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1135 s64 sectors = (s64) k.k->size;
1137 BUG_ON(!(flags & BTREE_TRIGGER_GC));
1139 if (flags & BTREE_TRIGGER_OVERWRITE)
1141 sectors *= replicas;
1143 percpu_down_read(&c->mark_lock);
1146 fs_usage = fs_usage_ptr(c, trans->journal_res.seq, flags & BTREE_TRIGGER_GC);
1147 replicas = clamp_t(unsigned, replicas, 1,
1148 ARRAY_SIZE(fs_usage->persistent_reserved));
1150 fs_usage->reserved += sectors;
1151 fs_usage->persistent_reserved[replicas - 1] += sectors;
1154 percpu_up_read(&c->mark_lock);
1159 int bch2_mark_reservation(struct btree_trans *trans,
1160 enum btree_id btree_id, unsigned level,
1161 struct bkey_s_c old, struct bkey_s_c new,
1164 return mem_trigger_run_overwrite_then_insert(__mark_reservation, trans, btree_id, level, old, new, flags);
1167 static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
1168 struct bkey_s_c_reflink_p p,
1170 u64 *idx, unsigned flags, size_t r_idx)
1172 struct bch_fs *c = trans->c;
1173 struct reflink_gc *r;
1174 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1177 struct printbuf buf = PRINTBUF;
1179 if (r_idx >= c->reflink_gc_nr)
1182 r = genradix_ptr(&c->reflink_gc_table, r_idx);
1183 next_idx = min(next_idx, r->offset - r->size);
1184 if (*idx < next_idx)
1187 BUG_ON((s64) r->refcount + add < 0);
1193 if (fsck_err(c, reflink_p_to_missing_reflink_v,
1194 "pointer to missing indirect extent\n"
1196 " missing range %llu-%llu",
1197 (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
1199 struct bkey_i_error *new;
1201 new = bch2_trans_kmalloc(trans, sizeof(*new));
1202 ret = PTR_ERR_OR_ZERO(new);
1207 new->k.type = KEY_TYPE_error;
1208 new->k.p = bkey_start_pos(p.k);
1209 new->k.p.offset += *idx - start;
1210 bch2_key_resize(&new->k, next_idx - *idx);
1211 ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, &new->k_i,
1212 BTREE_TRIGGER_NORUN);
1218 printbuf_exit(&buf);
1222 static int __mark_reflink_p(struct btree_trans *trans,
1223 enum btree_id btree_id, unsigned level,
1224 struct bkey_s_c k, unsigned flags)
1226 struct bch_fs *c = trans->c;
1227 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
1228 struct reflink_gc *ref;
1230 u64 idx = le64_to_cpu(p.v->idx), start = idx;
1231 u64 end = le64_to_cpu(p.v->idx) + p.k->size;
1234 BUG_ON(!(flags & BTREE_TRIGGER_GC));
1236 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_reflink_p_fix) {
1237 idx -= le32_to_cpu(p.v->front_pad);
1238 end += le32_to_cpu(p.v->back_pad);
1242 r = c->reflink_gc_nr;
1244 m = l + (r - l) / 2;
1246 ref = genradix_ptr(&c->reflink_gc_table, m);
1247 if (ref->offset <= idx)
1253 while (idx < end && !ret)
1254 ret = __bch2_mark_reflink_p(trans, p, start, end,
1260 int bch2_mark_reflink_p(struct btree_trans *trans,
1261 enum btree_id btree_id, unsigned level,
1262 struct bkey_s_c old, struct bkey_s_c new,
1265 return mem_trigger_run_overwrite_then_insert(__mark_reflink_p, trans, btree_id, level, old, new, flags);
1268 void bch2_trans_fs_usage_revert(struct btree_trans *trans,
1269 struct replicas_delta_list *deltas)
1271 struct bch_fs *c = trans->c;
1272 struct bch_fs_usage *dst;
1273 struct replicas_delta *d, *top = (void *) deltas->d + deltas->used;
1277 percpu_down_read(&c->mark_lock);
1279 dst = fs_usage_ptr(c, trans->journal_res.seq, false);
1281 /* revert changes: */
1282 for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
1283 switch (d->r.data_type) {
1284 case BCH_DATA_btree:
1286 case BCH_DATA_parity:
1289 BUG_ON(__update_replicas(c, dst, &d->r, -d->delta));
1292 dst->nr_inodes -= deltas->nr_inodes;
1294 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
1295 added -= deltas->persistent_reserved[i];
1296 dst->reserved -= deltas->persistent_reserved[i];
1297 dst->persistent_reserved[i] -= deltas->persistent_reserved[i];
1301 trans->disk_res->sectors += added;
1302 this_cpu_add(*c->online_reserved, added);
1306 percpu_up_read(&c->mark_lock);
1309 int bch2_trans_fs_usage_apply(struct btree_trans *trans,
1310 struct replicas_delta_list *deltas)
1312 struct bch_fs *c = trans->c;
1313 static int warned_disk_usage = 0;
1315 u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1316 struct replicas_delta *d, *d2;
1317 struct replicas_delta *top = (void *) deltas->d + deltas->used;
1318 struct bch_fs_usage *dst;
1319 s64 added = 0, should_not_have_added;
1322 percpu_down_read(&c->mark_lock);
1324 dst = fs_usage_ptr(c, trans->journal_res.seq, false);
1326 for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
1327 switch (d->r.data_type) {
1328 case BCH_DATA_btree:
1330 case BCH_DATA_parity:
1334 if (__update_replicas(c, dst, &d->r, d->delta))
1338 dst->nr_inodes += deltas->nr_inodes;
1340 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
1341 added += deltas->persistent_reserved[i];
1342 dst->reserved += deltas->persistent_reserved[i];
1343 dst->persistent_reserved[i] += deltas->persistent_reserved[i];
1347 * Not allowed to reduce sectors_available except by getting a
1350 should_not_have_added = added - (s64) disk_res_sectors;
1351 if (unlikely(should_not_have_added > 0)) {
1352 u64 old, new, v = atomic64_read(&c->sectors_available);
1356 new = max_t(s64, 0, old - should_not_have_added);
1357 } while ((v = atomic64_cmpxchg(&c->sectors_available,
1360 added -= should_not_have_added;
1365 trans->disk_res->sectors -= added;
1366 this_cpu_sub(*c->online_reserved, added);
1370 percpu_up_read(&c->mark_lock);
1372 if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
1373 bch2_trans_inconsistent(trans,
1374 "disk usage increased %lli more than %llu sectors reserved)",
1375 should_not_have_added, disk_res_sectors);
1378 /* revert changes: */
1379 for (d2 = deltas->d; d2 != d; d2 = replicas_delta_next(d2))
1380 BUG_ON(__update_replicas(c, dst, &d2->r, -d2->delta));
1383 percpu_up_read(&c->mark_lock);
1389 static inline int bch2_trans_mark_pointer(struct btree_trans *trans,
1390 enum btree_id btree_id, unsigned level,
1391 struct bkey_s_c k, struct extent_ptr_decoded p,
1394 bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
1395 struct btree_iter iter;
1396 struct bkey_i_alloc_v4 *a;
1398 struct bch_backpointer bp;
1402 bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket, &bp);
1403 sectors = bp.bucket_len;
1407 a = bch2_trans_start_alloc_update(trans, &iter, bucket);
1411 ret = __mark_pointer(trans, k, &p.ptr, sectors, bp.data_type,
1412 a->v.gen, &a->v.data_type,
1413 &a->v.dirty_sectors, &a->v.cached_sectors) ?:
1414 bch2_trans_update(trans, &iter, &a->k_i, 0);
1415 bch2_trans_iter_exit(trans, &iter);
1420 if (!p.ptr.cached) {
1421 ret = bch2_bucket_backpointer_mod(trans, bucket, bp, k, insert);
1429 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1430 struct extent_ptr_decoded p,
1431 s64 sectors, enum bch_data_type data_type)
1433 struct btree_iter iter;
1434 struct bkey_i_stripe *s;
1435 struct bch_replicas_padded r;
1438 s = bch2_bkey_get_mut_typed(trans, &iter,
1439 BTREE_ID_stripes, POS(0, p.ec.idx),
1440 BTREE_ITER_WITH_UPDATES, stripe);
1441 ret = PTR_ERR_OR_ZERO(s);
1442 if (unlikely(ret)) {
1443 bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
1444 "pointer to nonexistent stripe %llu",
1449 if (!bch2_ptr_matches_stripe(&s->v, p)) {
1450 bch2_trans_inconsistent(trans,
1451 "stripe pointer doesn't match stripe %llu",
1457 stripe_blockcount_set(&s->v, p.ec.block,
1458 stripe_blockcount_get(&s->v, p.ec.block) +
1461 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1462 r.e.data_type = data_type;
1463 ret = bch2_update_replicas_list(trans, &r.e, sectors);
1465 bch2_trans_iter_exit(trans, &iter);
1469 static int __trans_mark_extent(struct btree_trans *trans,
1470 enum btree_id btree_id, unsigned level,
1471 struct bkey_s_c k, unsigned flags)
1473 struct bch_fs *c = trans->c;
1474 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1475 const union bch_extent_entry *entry;
1476 struct extent_ptr_decoded p;
1477 struct bch_replicas_padded r;
1478 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
1481 s64 sectors = bkey_is_btree_ptr(k.k)
1484 s64 dirty_sectors = 0;
1488 r.e.data_type = data_type;
1490 r.e.nr_required = 1;
1492 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1493 s64 disk_sectors = ptr_disk_sectors(sectors, p);
1495 if (flags & BTREE_TRIGGER_OVERWRITE)
1496 disk_sectors = -disk_sectors;
1498 ret = bch2_trans_mark_pointer(trans, btree_id, level, k, p, flags);
1506 ret = bch2_update_cached_sectors_list(trans, p.ptr.dev,
1511 } else if (!p.has_ec) {
1512 dirty_sectors += disk_sectors;
1513 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1515 ret = bch2_trans_mark_stripe_ptr(trans, p,
1516 disk_sectors, data_type);
1520 r.e.nr_required = 0;
1525 ret = bch2_update_replicas_list(trans, &r.e, dirty_sectors);
1530 int bch2_trans_mark_extent(struct btree_trans *trans,
1531 enum btree_id btree_id, unsigned level,
1532 struct bkey_s_c old, struct bkey_i *new,
1535 struct bch_fs *c = trans->c;
1536 int mod = (int) bch2_bkey_needs_rebalance(c, bkey_i_to_s_c(new)) -
1537 (int) bch2_bkey_needs_rebalance(c, old);
1540 int ret = bch2_btree_bit_mod(trans, BTREE_ID_rebalance_work, new->k.p, mod > 0);
1545 return trigger_run_overwrite_then_insert(__trans_mark_extent, trans, btree_id, level, old, new, flags);
1548 static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
1549 struct bkey_s_c_stripe s,
1550 unsigned idx, bool deleting)
1552 struct bch_fs *c = trans->c;
1553 const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
1554 struct btree_iter iter;
1555 struct bkey_i_alloc_v4 *a;
1556 enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
1557 ? BCH_DATA_parity : 0;
1558 s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
1564 a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(c, ptr));
1568 ret = check_bucket_ref(trans, s.s_c, ptr, sectors, data_type,
1569 a->v.gen, a->v.data_type,
1570 a->v.dirty_sectors);
1575 if (bch2_trans_inconsistent_on(a->v.stripe ||
1576 a->v.stripe_redundancy, trans,
1577 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
1578 iter.pos.inode, iter.pos.offset, a->v.gen,
1579 bch2_data_types[a->v.data_type],
1581 a->v.stripe, s.k->p.offset)) {
1586 if (bch2_trans_inconsistent_on(data_type && a->v.dirty_sectors, trans,
1587 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
1588 iter.pos.inode, iter.pos.offset, a->v.gen,
1589 bch2_data_types[a->v.data_type],
1596 a->v.stripe = s.k->p.offset;
1597 a->v.stripe_redundancy = s.v->nr_redundant;
1598 a->v.data_type = BCH_DATA_stripe;
1600 if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
1601 a->v.stripe_redundancy != s.v->nr_redundant, trans,
1602 "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
1603 iter.pos.inode, iter.pos.offset, a->v.gen,
1604 s.k->p.offset, a->v.stripe)) {
1610 a->v.stripe_redundancy = 0;
1611 a->v.data_type = alloc_data_type(a->v, BCH_DATA_user);
1614 a->v.dirty_sectors += sectors;
1616 a->v.data_type = !deleting ? data_type : 0;
1618 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1622 bch2_trans_iter_exit(trans, &iter);
1626 int bch2_trans_mark_stripe(struct btree_trans *trans,
1627 enum btree_id btree_id, unsigned level,
1628 struct bkey_s_c old, struct bkey_i *new,
1631 const struct bch_stripe *old_s = NULL;
1632 struct bch_stripe *new_s = NULL;
1633 struct bch_replicas_padded r;
1634 unsigned i, nr_blocks;
1637 if (old.k->type == KEY_TYPE_stripe)
1638 old_s = bkey_s_c_to_stripe(old).v;
1639 if (new->k.type == KEY_TYPE_stripe)
1640 new_s = &bkey_i_to_stripe(new)->v;
1643 * If the pointers aren't changing, we don't need to do anything:
1645 if (new_s && old_s &&
1646 new_s->nr_blocks == old_s->nr_blocks &&
1647 new_s->nr_redundant == old_s->nr_redundant &&
1648 !memcmp(old_s->ptrs, new_s->ptrs,
1649 new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
1652 BUG_ON(new_s && old_s &&
1653 (new_s->nr_blocks != old_s->nr_blocks ||
1654 new_s->nr_redundant != old_s->nr_redundant));
1656 nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
1659 s64 sectors = le16_to_cpu(new_s->sectors);
1661 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(new));
1662 ret = bch2_update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant);
1668 s64 sectors = -((s64) le16_to_cpu(old_s->sectors));
1670 bch2_bkey_to_replicas(&r.e, old);
1671 ret = bch2_update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant);
1676 for (i = 0; i < nr_blocks; i++) {
1677 if (new_s && old_s &&
1678 !memcmp(&new_s->ptrs[i],
1680 sizeof(new_s->ptrs[i])))
1684 ret = bch2_trans_mark_stripe_bucket(trans,
1685 bkey_i_to_s_c_stripe(new), i, false);
1691 ret = bch2_trans_mark_stripe_bucket(trans,
1692 bkey_s_c_to_stripe(old), i, true);
1701 static int __trans_mark_reservation(struct btree_trans *trans,
1702 enum btree_id btree_id, unsigned level,
1703 struct bkey_s_c k, unsigned flags)
1705 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1706 s64 sectors = (s64) k.k->size;
1707 struct replicas_delta_list *d;
1710 if (flags & BTREE_TRIGGER_OVERWRITE)
1712 sectors *= replicas;
1714 ret = bch2_replicas_deltas_realloc(trans, 0);
1718 d = trans->fs_usage_deltas;
1719 replicas = clamp_t(unsigned, replicas, 1,
1720 ARRAY_SIZE(d->persistent_reserved));
1722 d->persistent_reserved[replicas - 1] += sectors;
1726 int bch2_trans_mark_reservation(struct btree_trans *trans,
1727 enum btree_id btree_id, unsigned level,
1728 struct bkey_s_c old,
1732 return trigger_run_overwrite_then_insert(__trans_mark_reservation, trans, btree_id, level, old, new, flags);
1735 static int trans_mark_reflink_p_segment(struct btree_trans *trans,
1736 struct bkey_s_c_reflink_p p,
1737 u64 *idx, unsigned flags)
1739 struct bch_fs *c = trans->c;
1740 struct btree_iter iter;
1743 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1744 struct printbuf buf = PRINTBUF;
1747 k = bch2_bkey_get_mut_noupdate(trans, &iter,
1748 BTREE_ID_reflink, POS(0, *idx),
1749 BTREE_ITER_WITH_UPDATES);
1750 ret = PTR_ERR_OR_ZERO(k);
1754 refcount = bkey_refcount(k);
1756 bch2_bkey_val_to_text(&buf, c, p.s_c);
1757 bch2_trans_inconsistent(trans,
1758 "nonexistent indirect extent at %llu while marking\n %s",
1764 if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
1765 bch2_bkey_val_to_text(&buf, c, p.s_c);
1766 bch2_trans_inconsistent(trans,
1767 "indirect extent refcount underflow at %llu while marking\n %s",
1773 if (flags & BTREE_TRIGGER_INSERT) {
1774 struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
1777 pad = max_t(s64, le32_to_cpu(v->front_pad),
1778 le64_to_cpu(v->idx) - bkey_start_offset(&k->k));
1779 BUG_ON(pad > U32_MAX);
1780 v->front_pad = cpu_to_le32(pad);
1782 pad = max_t(s64, le32_to_cpu(v->back_pad),
1783 k->k.p.offset - p.k->size - le64_to_cpu(v->idx));
1784 BUG_ON(pad > U32_MAX);
1785 v->back_pad = cpu_to_le32(pad);
1788 le64_add_cpu(refcount, add);
1790 bch2_btree_iter_set_pos_to_extent_start(&iter);
1791 ret = bch2_trans_update(trans, &iter, k, 0);
1795 *idx = k->k.p.offset;
1797 bch2_trans_iter_exit(trans, &iter);
1798 printbuf_exit(&buf);
1802 static int __trans_mark_reflink_p(struct btree_trans *trans,
1803 enum btree_id btree_id, unsigned level,
1804 struct bkey_s_c k, unsigned flags)
1806 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
1810 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
1811 end_idx = le64_to_cpu(p.v->idx) + p.k->size +
1812 le32_to_cpu(p.v->back_pad);
1814 while (idx < end_idx && !ret)
1815 ret = trans_mark_reflink_p_segment(trans, p, &idx, flags);
1819 int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1820 enum btree_id btree_id, unsigned level,
1821 struct bkey_s_c old,
1825 if (flags & BTREE_TRIGGER_INSERT) {
1826 struct bch_reflink_p *v = &bkey_i_to_reflink_p(new)->v;
1828 v->front_pad = v->back_pad = 0;
1831 return trigger_run_overwrite_then_insert(__trans_mark_reflink_p, trans, btree_id, level, old, new, flags);
1834 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1835 struct bch_dev *ca, size_t b,
1836 enum bch_data_type type,
1839 struct bch_fs *c = trans->c;
1840 struct btree_iter iter;
1841 struct bkey_i_alloc_v4 *a;
1845 * Backup superblock might be past the end of our normal usable space:
1847 if (b >= ca->mi.nbuckets)
1850 a = bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
1854 if (a->v.data_type && type && a->v.data_type != type) {
1855 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1856 BCH_FSCK_ERR_bucket_metadata_type_mismatch,
1857 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
1859 iter.pos.inode, iter.pos.offset, a->v.gen,
1860 bch2_data_types[a->v.data_type],
1861 bch2_data_types[type],
1862 bch2_data_types[type]);
1867 if (a->v.data_type != type ||
1868 a->v.dirty_sectors != sectors) {
1869 a->v.data_type = type;
1870 a->v.dirty_sectors = sectors;
1871 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1874 bch2_trans_iter_exit(trans, &iter);
1878 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1879 struct bch_dev *ca, size_t b,
1880 enum bch_data_type type,
1883 return commit_do(trans, NULL, NULL, 0,
1884 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
1887 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
1890 enum bch_data_type type,
1891 u64 *bucket, unsigned *bucket_sectors)
1894 u64 b = sector_to_bucket(ca, start);
1896 min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1898 if (b != *bucket && *bucket_sectors) {
1899 int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1900 type, *bucket_sectors);
1904 *bucket_sectors = 0;
1908 *bucket_sectors += sectors;
1910 } while (start < end);
1915 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
1918 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1920 unsigned i, bucket_sectors = 0;
1923 for (i = 0; i < layout->nr_superblocks; i++) {
1924 u64 offset = le64_to_cpu(layout->sb_offset[i]);
1926 if (offset == BCH_SB_SECTOR) {
1927 ret = bch2_trans_mark_metadata_sectors(trans, ca,
1929 BCH_DATA_sb, &bucket, &bucket_sectors);
1934 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1935 offset + (1 << layout->sb_max_size_bits),
1936 BCH_DATA_sb, &bucket, &bucket_sectors);
1941 if (bucket_sectors) {
1942 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1943 bucket, BCH_DATA_sb, bucket_sectors);
1948 for (i = 0; i < ca->journal.nr; i++) {
1949 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1950 ca->journal.buckets[i],
1951 BCH_DATA_journal, ca->mi.bucket_size);
1959 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
1961 int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(trans, ca));
1968 int bch2_trans_mark_dev_sbs(struct bch_fs *c)
1973 for_each_online_member(ca, c, i) {
1974 int ret = bch2_trans_mark_dev_sb(c, ca);
1976 percpu_ref_put(&ca->ref);
1984 /* Disk reservations: */
1986 #define SECTORS_CACHE 1024
1988 int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1989 u64 sectors, int flags)
1991 struct bch_fs_pcpu *pcpu;
1993 s64 sectors_available;
1996 percpu_down_read(&c->mark_lock);
1998 pcpu = this_cpu_ptr(c->pcpu);
2000 if (sectors <= pcpu->sectors_available)
2003 v = atomic64_read(&c->sectors_available);
2006 get = min((u64) sectors + SECTORS_CACHE, old);
2008 if (get < sectors) {
2012 } while ((v = atomic64_cmpxchg(&c->sectors_available,
2013 old, old - get)) != old);
2015 pcpu->sectors_available += get;
2018 pcpu->sectors_available -= sectors;
2019 this_cpu_add(*c->online_reserved, sectors);
2020 res->sectors += sectors;
2023 percpu_up_read(&c->mark_lock);
2027 mutex_lock(&c->sectors_available_lock);
2029 percpu_u64_set(&c->pcpu->sectors_available, 0);
2030 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
2032 if (sectors <= sectors_available ||
2033 (flags & BCH_DISK_RESERVATION_NOFAIL)) {
2034 atomic64_set(&c->sectors_available,
2035 max_t(s64, 0, sectors_available - sectors));
2036 this_cpu_add(*c->online_reserved, sectors);
2037 res->sectors += sectors;
2040 atomic64_set(&c->sectors_available, sectors_available);
2041 ret = -BCH_ERR_ENOSPC_disk_reservation;
2044 mutex_unlock(&c->sectors_available_lock);
2045 percpu_up_read(&c->mark_lock);
2050 /* Startup/shutdown: */
2052 static void bucket_gens_free_rcu(struct rcu_head *rcu)
2054 struct bucket_gens *buckets =
2055 container_of(rcu, struct bucket_gens, rcu);
2057 kvpfree(buckets, sizeof(*buckets) + buckets->nbuckets);
2060 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2062 struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
2063 unsigned long *buckets_nouse = NULL;
2064 bool resize = ca->bucket_gens != NULL;
2067 if (!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
2068 GFP_KERNEL|__GFP_ZERO))) {
2069 ret = -BCH_ERR_ENOMEM_bucket_gens;
2073 if ((c->opts.buckets_nouse &&
2074 !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2075 sizeof(unsigned long),
2076 GFP_KERNEL|__GFP_ZERO)))) {
2077 ret = -BCH_ERR_ENOMEM_buckets_nouse;
2081 bucket_gens->first_bucket = ca->mi.first_bucket;
2082 bucket_gens->nbuckets = nbuckets;
2085 down_write(&c->gc_lock);
2086 down_write(&ca->bucket_lock);
2087 percpu_down_write(&c->mark_lock);
2090 old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
2093 size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets);
2095 memcpy(bucket_gens->b,
2099 memcpy(buckets_nouse,
2101 BITS_TO_LONGS(n) * sizeof(unsigned long));
2104 rcu_assign_pointer(ca->bucket_gens, bucket_gens);
2105 bucket_gens = old_bucket_gens;
2107 swap(ca->buckets_nouse, buckets_nouse);
2109 nbuckets = ca->mi.nbuckets;
2112 percpu_up_write(&c->mark_lock);
2113 up_write(&ca->bucket_lock);
2114 up_write(&c->gc_lock);
2119 kvpfree(buckets_nouse,
2120 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2122 call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
2127 void bch2_dev_buckets_free(struct bch_dev *ca)
2131 kvpfree(ca->buckets_nouse,
2132 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2133 kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
2134 sizeof(struct bucket_gens) + ca->mi.nbuckets);
2136 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
2137 free_percpu(ca->usage[i]);
2138 kfree(ca->usage_base);
2141 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2145 ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
2146 if (!ca->usage_base)
2147 return -BCH_ERR_ENOMEM_usage_init;
2149 for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
2150 ca->usage[i] = alloc_percpu(struct bch_dev_usage);
2152 return -BCH_ERR_ENOMEM_usage_init;
2155 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);