1 // SPDX-License-Identifier: GPL-2.0
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
9 #include "alloc_background.h"
10 #include "backpointers.h"
13 #include "btree_update.h"
15 #include "buckets_waiting_for_journal.h"
23 #include "subvolume.h"
26 #include <linux/preempt.h>
28 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
29 enum bch_data_type data_type,
34 fs_usage->btree += sectors;
38 fs_usage->data += sectors;
41 fs_usage->cached += sectors;
48 void bch2_fs_usage_initialize(struct bch_fs *c)
50 percpu_down_write(&c->mark_lock);
51 struct bch_fs_usage *usage = c->usage_base;
53 for (unsigned i = 0; i < ARRAY_SIZE(c->usage); i++)
54 bch2_fs_usage_acc_to_base(c, i);
56 for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++)
57 usage->reserved += usage->persistent_reserved[i];
59 for (unsigned i = 0; i < c->replicas.nr; i++) {
60 struct bch_replicas_entry_v1 *e =
61 cpu_replicas_entry(&c->replicas, i);
63 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
66 for_each_member_device(c, ca) {
67 struct bch_dev_usage dev = bch2_dev_usage_read(ca);
69 usage->hidden += (dev.d[BCH_DATA_sb].buckets +
70 dev.d[BCH_DATA_journal].buckets) *
74 percpu_up_write(&c->mark_lock);
77 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
81 BUG_ON(!gc && !journal_seq);
83 return this_cpu_ptr(gc
85 : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
88 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
90 struct bch_fs *c = ca->fs;
91 unsigned seq, i, u64s = dev_usage_u64s();
94 seq = read_seqcount_begin(&c->usage_lock);
95 memcpy(usage, ca->usage_base, u64s * sizeof(u64));
96 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
97 acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage[i], u64s);
98 } while (read_seqcount_retry(&c->usage_lock, seq));
101 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
103 ssize_t offset = v - (u64 *) c->usage_base;
107 BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
108 percpu_rwsem_assert_held(&c->mark_lock);
111 seq = read_seqcount_begin(&c->usage_lock);
114 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
115 ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
116 } while (read_seqcount_retry(&c->usage_lock, seq));
121 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
123 struct bch_fs_usage_online *ret;
124 unsigned nr_replicas = READ_ONCE(c->replicas.nr);
127 ret = kmalloc(__fs_usage_online_u64s(nr_replicas) * sizeof(u64), GFP_KERNEL);
131 percpu_down_read(&c->mark_lock);
133 if (nr_replicas != c->replicas.nr) {
134 nr_replicas = c->replicas.nr;
135 percpu_up_read(&c->mark_lock);
140 ret->online_reserved = percpu_u64_get(c->online_reserved);
143 seq = read_seqcount_begin(&c->usage_lock);
144 unsafe_memcpy(&ret->u, c->usage_base,
145 __fs_usage_u64s(nr_replicas) * sizeof(u64),
146 "embedded variable length struct");
147 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
148 acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i],
149 __fs_usage_u64s(nr_replicas));
150 } while (read_seqcount_retry(&c->usage_lock, seq));
155 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
157 unsigned u64s = fs_usage_u64s(c);
159 BUG_ON(idx >= ARRAY_SIZE(c->usage));
162 write_seqcount_begin(&c->usage_lock);
164 acc_u64s_percpu((u64 *) c->usage_base,
165 (u64 __percpu *) c->usage[idx], u64s);
166 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
169 for_each_member_device_rcu(c, ca, NULL) {
170 u64s = dev_usage_u64s();
172 acc_u64s_percpu((u64 *) ca->usage_base,
173 (u64 __percpu *) ca->usage[idx], u64s);
174 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
178 write_seqcount_end(&c->usage_lock);
182 void bch2_fs_usage_to_text(struct printbuf *out,
184 struct bch_fs_usage_online *fs_usage)
188 prt_printf(out, "capacity:\t\t\t%llu\n", c->capacity);
190 prt_printf(out, "hidden:\t\t\t\t%llu\n",
192 prt_printf(out, "data:\t\t\t\t%llu\n",
194 prt_printf(out, "cached:\t\t\t\t%llu\n",
196 prt_printf(out, "reserved:\t\t\t%llu\n",
197 fs_usage->u.reserved);
198 prt_printf(out, "nr_inodes:\t\t\t%llu\n",
199 fs_usage->u.nr_inodes);
200 prt_printf(out, "online reserved:\t\t%llu\n",
201 fs_usage->online_reserved);
204 i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
206 prt_printf(out, "%u replicas:\n", i + 1);
207 prt_printf(out, "\treserved:\t\t%llu\n",
208 fs_usage->u.persistent_reserved[i]);
211 for (i = 0; i < c->replicas.nr; i++) {
212 struct bch_replicas_entry_v1 *e =
213 cpu_replicas_entry(&c->replicas, i);
215 prt_printf(out, "\t");
216 bch2_replicas_entry_to_text(out, e);
217 prt_printf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
221 static u64 reserve_factor(u64 r)
223 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
226 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
228 return min(fs_usage->u.hidden +
231 reserve_factor(fs_usage->u.reserved +
232 fs_usage->online_reserved),
236 static struct bch_fs_usage_short
237 __bch2_fs_usage_read_short(struct bch_fs *c)
239 struct bch_fs_usage_short ret;
242 ret.capacity = c->capacity -
243 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
245 data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
246 bch2_fs_usage_read_one(c, &c->usage_base->btree);
247 reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
248 percpu_u64_get(c->online_reserved);
250 ret.used = min(ret.capacity, data + reserve_factor(reserved));
251 ret.free = ret.capacity - ret.used;
253 ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
258 struct bch_fs_usage_short
259 bch2_fs_usage_read_short(struct bch_fs *c)
261 struct bch_fs_usage_short ret;
263 percpu_down_read(&c->mark_lock);
264 ret = __bch2_fs_usage_read_short(c);
265 percpu_up_read(&c->mark_lock);
270 void bch2_dev_usage_init(struct bch_dev *ca)
272 ca->usage_base->d[BCH_DATA_free].buckets = ca->mi.nbuckets - ca->mi.first_bucket;
275 void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage)
278 prt_str(out, "buckets");
280 prt_str(out, "sectors");
282 prt_str(out, "fragmented");
286 for (unsigned i = 0; i < BCH_DATA_NR; i++) {
287 prt_str(out, bch2_data_types[i]);
289 prt_u64(out, usage->d[i].buckets);
291 prt_u64(out, usage->d[i].sectors);
293 prt_u64(out, usage->d[i].fragmented);
299 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
300 struct bch_alloc_v4 old,
301 struct bch_alloc_v4 new,
302 u64 journal_seq, bool gc)
304 struct bch_fs_usage *fs_usage;
305 struct bch_dev_usage *u;
308 fs_usage = fs_usage_ptr(c, journal_seq, gc);
310 if (data_type_is_hidden(old.data_type))
311 fs_usage->hidden -= ca->mi.bucket_size;
312 if (data_type_is_hidden(new.data_type))
313 fs_usage->hidden += ca->mi.bucket_size;
315 u = dev_usage_ptr(ca, journal_seq, gc);
317 u->d[old.data_type].buckets--;
318 u->d[new.data_type].buckets++;
320 u->d[old.data_type].sectors -= bch2_bucket_sectors_dirty(old);
321 u->d[new.data_type].sectors += bch2_bucket_sectors_dirty(new);
323 u->d[BCH_DATA_cached].sectors += new.cached_sectors;
324 u->d[BCH_DATA_cached].sectors -= old.cached_sectors;
326 u->d[old.data_type].fragmented -= bch2_bucket_sectors_fragmented(ca, old);
327 u->d[new.data_type].fragmented += bch2_bucket_sectors_fragmented(ca, new);
332 static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
334 return (struct bch_alloc_v4) {
336 .data_type = b.data_type,
337 .dirty_sectors = b.dirty_sectors,
338 .cached_sectors = b.cached_sectors,
343 static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
344 struct bucket old, struct bucket new)
346 bch2_dev_usage_update(c, ca,
347 bucket_m_to_alloc(old),
348 bucket_m_to_alloc(new),
352 static inline int __update_replicas(struct bch_fs *c,
353 struct bch_fs_usage *fs_usage,
354 struct bch_replicas_entry_v1 *r,
357 int idx = bch2_replicas_entry_idx(c, r);
362 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
363 fs_usage->replicas[idx] += sectors;
367 static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
368 struct bch_replicas_entry_v1 *r, s64 sectors,
369 unsigned journal_seq, bool gc)
371 struct bch_fs_usage *fs_usage;
373 struct printbuf buf = PRINTBUF;
375 percpu_down_read(&c->mark_lock);
377 idx = bch2_replicas_entry_idx(c, r);
379 fsck_err(c, ptr_to_missing_replicas_entry,
380 "no replicas entry\n while marking %s",
381 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
382 percpu_up_read(&c->mark_lock);
383 ret = bch2_mark_replicas(c, r);
384 percpu_down_read(&c->mark_lock);
388 idx = bch2_replicas_entry_idx(c, r);
396 fs_usage = fs_usage_ptr(c, journal_seq, gc);
397 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
398 fs_usage->replicas[idx] += sectors;
402 percpu_up_read(&c->mark_lock);
407 static inline int update_cached_sectors(struct bch_fs *c,
409 unsigned dev, s64 sectors,
410 unsigned journal_seq, bool gc)
412 struct bch_replicas_padded r;
414 bch2_replicas_entry_cached(&r.e, dev);
416 return update_replicas(c, k, &r.e, sectors, journal_seq, gc);
419 static int __replicas_deltas_realloc(struct btree_trans *trans, unsigned more,
422 struct replicas_delta_list *d = trans->fs_usage_deltas;
423 unsigned new_size = d ? (d->size + more) * 2 : 128;
424 unsigned alloc_size = sizeof(*d) + new_size;
426 WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
428 if (!d || d->used + more > d->size) {
429 d = krealloc(d, alloc_size, gfp|__GFP_ZERO);
432 if (alloc_size > REPLICAS_DELTA_LIST_MAX)
435 d = mempool_alloc(&trans->c->replicas_delta_pool, gfp);
439 memset(d, 0, REPLICAS_DELTA_LIST_MAX);
441 if (trans->fs_usage_deltas)
442 memcpy(d, trans->fs_usage_deltas,
443 trans->fs_usage_deltas->size + sizeof(*d));
445 new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
446 kfree(trans->fs_usage_deltas);
450 trans->fs_usage_deltas = d;
456 int bch2_replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
458 return allocate_dropping_locks_errcode(trans,
459 __replicas_deltas_realloc(trans, more, _gfp));
462 int bch2_update_replicas_list(struct btree_trans *trans,
463 struct bch_replicas_entry_v1 *r,
466 struct replicas_delta_list *d;
467 struct replicas_delta *n;
474 b = replicas_entry_bytes(r) + 8;
475 ret = bch2_replicas_deltas_realloc(trans, b);
479 d = trans->fs_usage_deltas;
480 n = (void *) d->d + d->used;
482 unsafe_memcpy((void *) n + offsetof(struct replicas_delta, r),
483 r, replicas_entry_bytes(r),
484 "flexible array member embedded in strcuct with padding");
485 bch2_replicas_entry_sort(&n->r);
490 int bch2_update_cached_sectors_list(struct btree_trans *trans, unsigned dev, s64 sectors)
492 struct bch_replicas_padded r;
494 bch2_replicas_entry_cached(&r.e, dev);
496 return bch2_update_replicas_list(trans, &r.e, sectors);
499 int bch2_mark_alloc(struct btree_trans *trans,
500 enum btree_id btree, unsigned level,
501 struct bkey_s_c old, struct bkey_s_c new,
504 bool gc = flags & BTREE_TRIGGER_GC;
505 u64 journal_seq = trans->journal_res.seq;
506 u64 bucket_journal_seq;
507 struct bch_fs *c = trans->c;
508 struct bch_alloc_v4 old_a_convert, new_a_convert;
509 const struct bch_alloc_v4 *old_a, *new_a;
514 * alloc btree is read in by bch2_alloc_read, not gc:
516 if ((flags & BTREE_TRIGGER_GC) &&
517 !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
520 if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
521 "alloc key for invalid device or bucket"))
524 ca = bch_dev_bkey_exists(c, new.k->p.inode);
526 old_a = bch2_alloc_to_v4(old, &old_a_convert);
527 new_a = bch2_alloc_to_v4(new, &new_a_convert);
529 bucket_journal_seq = new_a->journal_seq;
531 if ((flags & BTREE_TRIGGER_INSERT) &&
532 data_type_is_empty(old_a->data_type) !=
533 data_type_is_empty(new_a->data_type) &&
534 new.k->type == KEY_TYPE_alloc_v4) {
535 struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
537 EBUG_ON(!journal_seq);
540 * If the btree updates referring to a bucket weren't flushed
541 * before the bucket became empty again, then the we don't have
542 * to wait on a journal flush before we can reuse the bucket:
544 v->journal_seq = bucket_journal_seq =
545 data_type_is_empty(new_a->data_type) &&
546 (journal_seq == v->journal_seq ||
547 bch2_journal_noflush_seq(&c->journal, v->journal_seq))
551 if (!data_type_is_empty(old_a->data_type) &&
552 data_type_is_empty(new_a->data_type) &&
553 bucket_journal_seq) {
554 ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
555 c->journal.flushed_seq_ondisk,
556 new.k->p.inode, new.k->p.offset,
559 bch2_fs_fatal_error(c,
560 "error setting bucket_needs_journal_commit: %i", ret);
565 percpu_down_read(&c->mark_lock);
566 if (!gc && new_a->gen != old_a->gen)
567 *bucket_gen(ca, new.k->p.offset) = new_a->gen;
569 bch2_dev_usage_update(c, ca, *old_a, *new_a, journal_seq, gc);
572 struct bucket *g = gc_bucket(ca, new.k->p.offset);
578 g->data_type = new_a->data_type;
579 g->stripe = new_a->stripe;
580 g->stripe_redundancy = new_a->stripe_redundancy;
581 g->dirty_sectors = new_a->dirty_sectors;
582 g->cached_sectors = new_a->cached_sectors;
586 percpu_up_read(&c->mark_lock);
588 if (new_a->data_type == BCH_DATA_free &&
589 (!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
590 closure_wake_up(&c->freelist_wait);
592 if (new_a->data_type == BCH_DATA_need_discard &&
593 (!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
596 if (old_a->data_type != BCH_DATA_cached &&
597 new_a->data_type == BCH_DATA_cached &&
598 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
599 bch2_do_invalidates(c);
601 if (new_a->data_type == BCH_DATA_need_gc_gens)
607 int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
608 size_t b, enum bch_data_type data_type,
609 unsigned sectors, struct gc_pos pos,
612 struct bucket old, new, *g;
615 BUG_ON(!(flags & BTREE_TRIGGER_GC));
616 BUG_ON(data_type != BCH_DATA_sb &&
617 data_type != BCH_DATA_journal);
620 * Backup superblock might be past the end of our normal usable space:
622 if (b >= ca->mi.nbuckets)
625 percpu_down_read(&c->mark_lock);
626 g = gc_bucket(ca, b);
631 if (bch2_fs_inconsistent_on(g->data_type &&
632 g->data_type != data_type, c,
633 "different types of data in same bucket: %s, %s",
634 bch2_data_types[g->data_type],
635 bch2_data_types[data_type])) {
640 if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
641 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
642 ca->dev_idx, b, g->gen,
643 bch2_data_types[g->data_type ?: data_type],
644 g->dirty_sectors, sectors)) {
649 g->data_type = data_type;
650 g->dirty_sectors += sectors;
655 bch2_dev_usage_update_m(c, ca, old, new);
656 percpu_up_read(&c->mark_lock);
660 static int check_bucket_ref(struct btree_trans *trans,
662 const struct bch_extent_ptr *ptr,
663 s64 sectors, enum bch_data_type ptr_data_type,
664 u8 b_gen, u8 bucket_data_type,
667 struct bch_fs *c = trans->c;
668 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
669 size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
670 struct printbuf buf = PRINTBUF;
673 if (bucket_data_type == BCH_DATA_cached)
674 bucket_data_type = BCH_DATA_user;
676 if ((bucket_data_type == BCH_DATA_stripe && ptr_data_type == BCH_DATA_user) ||
677 (bucket_data_type == BCH_DATA_user && ptr_data_type == BCH_DATA_stripe))
678 bucket_data_type = ptr_data_type = BCH_DATA_stripe;
680 if (gen_after(ptr->gen, b_gen)) {
681 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
682 BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen,
683 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
685 ptr->dev, bucket_nr, b_gen,
686 bch2_data_types[bucket_data_type ?: ptr_data_type],
688 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
693 if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
694 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
695 BCH_FSCK_ERR_ptr_too_stale,
696 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
698 ptr->dev, bucket_nr, b_gen,
699 bch2_data_types[bucket_data_type ?: ptr_data_type],
701 (printbuf_reset(&buf),
702 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
707 if (b_gen != ptr->gen && !ptr->cached) {
708 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
709 BCH_FSCK_ERR_stale_dirty_ptr,
710 "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
712 ptr->dev, bucket_nr, b_gen,
713 *bucket_gen(ca, bucket_nr),
714 bch2_data_types[bucket_data_type ?: ptr_data_type],
716 (printbuf_reset(&buf),
717 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
722 if (b_gen != ptr->gen) {
727 if (!data_type_is_empty(bucket_data_type) &&
729 bucket_data_type != ptr_data_type) {
730 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
731 BCH_FSCK_ERR_ptr_bucket_data_type_mismatch,
732 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
734 ptr->dev, bucket_nr, b_gen,
735 bch2_data_types[bucket_data_type],
736 bch2_data_types[ptr_data_type],
737 (printbuf_reset(&buf),
738 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
743 if ((u64) bucket_sectors + sectors > U32_MAX) {
744 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
745 BCH_FSCK_ERR_bucket_sector_count_overflow,
746 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
748 ptr->dev, bucket_nr, b_gen,
749 bch2_data_types[bucket_data_type ?: ptr_data_type],
750 bucket_sectors, sectors,
751 (printbuf_reset(&buf),
752 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
760 bch2_dump_trans_updates(trans);
764 static int mark_stripe_bucket(struct btree_trans *trans,
769 struct bch_fs *c = trans->c;
770 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
771 unsigned nr_data = s->nr_blocks - s->nr_redundant;
772 bool parity = ptr_idx >= nr_data;
773 enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
774 s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
775 const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
776 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
777 struct bucket old, new, *g;
778 struct printbuf buf = PRINTBUF;
781 BUG_ON(!(flags & BTREE_TRIGGER_GC));
783 /* * XXX doesn't handle deletion */
785 percpu_down_read(&c->mark_lock);
786 g = PTR_GC_BUCKET(ca, ptr);
788 if (g->dirty_sectors ||
789 (g->stripe && g->stripe != k.k->p.offset)) {
790 bch2_fs_inconsistent(c,
791 "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
792 ptr->dev, PTR_BUCKET_NR(ca, ptr), g->gen,
793 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
801 ret = check_bucket_ref(trans, k, ptr, sectors, data_type,
802 g->gen, g->data_type,
807 g->data_type = data_type;
808 g->dirty_sectors += sectors;
810 g->stripe = k.k->p.offset;
811 g->stripe_redundancy = s->nr_redundant;
816 bch2_dev_usage_update_m(c, ca, old, new);
817 percpu_up_read(&c->mark_lock);
822 static int __mark_pointer(struct btree_trans *trans,
824 const struct bch_extent_ptr *ptr,
825 s64 sectors, enum bch_data_type ptr_data_type,
826 u8 bucket_gen, u8 *bucket_data_type,
827 u32 *dirty_sectors, u32 *cached_sectors)
829 u32 *dst_sectors = !ptr->cached
832 int ret = check_bucket_ref(trans, k, ptr, sectors, ptr_data_type,
833 bucket_gen, *bucket_data_type, *dst_sectors);
838 *dst_sectors += sectors;
840 if (!*dirty_sectors && !*cached_sectors)
841 *bucket_data_type = 0;
842 else if (*bucket_data_type != BCH_DATA_stripe)
843 *bucket_data_type = ptr_data_type;
848 static int bch2_mark_pointer(struct btree_trans *trans,
849 enum btree_id btree_id, unsigned level,
851 struct extent_ptr_decoded p,
855 struct bch_fs *c = trans->c;
856 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
857 struct bucket old, new, *g;
858 enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
862 BUG_ON(!(flags & BTREE_TRIGGER_GC));
864 percpu_down_read(&c->mark_lock);
865 g = PTR_GC_BUCKET(ca, &p.ptr);
869 bucket_data_type = g->data_type;
870 ret = __mark_pointer(trans, k, &p.ptr, sectors,
876 g->data_type = bucket_data_type;
881 bch2_dev_usage_update_m(c, ca, old, new);
882 percpu_up_read(&c->mark_lock);
887 static int bch2_mark_stripe_ptr(struct btree_trans *trans,
889 struct bch_extent_stripe_ptr p,
890 enum bch_data_type data_type,
894 struct bch_fs *c = trans->c;
895 struct bch_replicas_padded r;
898 BUG_ON(!(flags & BTREE_TRIGGER_GC));
900 m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
902 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
904 return -BCH_ERR_ENOMEM_mark_stripe_ptr;
907 mutex_lock(&c->ec_stripes_heap_lock);
909 if (!m || !m->alive) {
910 mutex_unlock(&c->ec_stripes_heap_lock);
911 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
913 bch2_inconsistent_error(c);
917 m->block_sectors[p.block] += sectors;
920 mutex_unlock(&c->ec_stripes_heap_lock);
922 r.e.data_type = data_type;
923 update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
928 static int __mark_extent(struct btree_trans *trans,
929 enum btree_id btree_id, unsigned level,
930 struct bkey_s_c k, unsigned flags)
932 u64 journal_seq = trans->journal_res.seq;
933 struct bch_fs *c = trans->c;
934 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
935 const union bch_extent_entry *entry;
936 struct extent_ptr_decoded p;
937 struct bch_replicas_padded r;
938 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
941 s64 sectors = bkey_is_btree_ptr(k.k)
944 s64 dirty_sectors = 0;
948 BUG_ON(!(flags & BTREE_TRIGGER_GC));
950 r.e.data_type = data_type;
954 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
955 s64 disk_sectors = ptr_disk_sectors(sectors, p);
957 if (flags & BTREE_TRIGGER_OVERWRITE)
958 disk_sectors = -disk_sectors;
960 ret = bch2_mark_pointer(trans, btree_id, level, k, p, disk_sectors, flags);
968 ret = update_cached_sectors(c, k, p.ptr.dev,
969 disk_sectors, journal_seq, true);
971 bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
976 } else if (!p.has_ec) {
977 dirty_sectors += disk_sectors;
978 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
980 ret = bch2_mark_stripe_ptr(trans, k, p.ec, data_type,
981 disk_sectors, flags);
986 * There may be other dirty pointers in this extent, but
987 * if so they're not required for mounting if we have an
988 * erasure coded pointer in this extent:
995 ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true);
997 struct printbuf buf = PRINTBUF;
999 bch2_bkey_val_to_text(&buf, c, k);
1000 bch2_fs_fatal_error(c, "%s(): no replicas entry for %s", __func__, buf.buf);
1001 printbuf_exit(&buf);
1009 int bch2_mark_extent(struct btree_trans *trans,
1010 enum btree_id btree_id, unsigned level,
1011 struct bkey_s_c old, struct bkey_s_c new,
1014 return mem_trigger_run_overwrite_then_insert(__mark_extent, trans, btree_id, level, old, new, flags);
1017 int bch2_mark_stripe(struct btree_trans *trans,
1018 enum btree_id btree_id, unsigned level,
1019 struct bkey_s_c old, struct bkey_s_c new,
1022 bool gc = flags & BTREE_TRIGGER_GC;
1023 u64 journal_seq = trans->journal_res.seq;
1024 struct bch_fs *c = trans->c;
1025 u64 idx = new.k->p.offset;
1026 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1027 ? bkey_s_c_to_stripe(old).v : NULL;
1028 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1029 ? bkey_s_c_to_stripe(new).v : NULL;
1033 BUG_ON(gc && old_s);
1036 struct stripe *m = genradix_ptr(&c->stripes, idx);
1039 struct printbuf buf1 = PRINTBUF;
1040 struct printbuf buf2 = PRINTBUF;
1042 bch2_bkey_val_to_text(&buf1, c, old);
1043 bch2_bkey_val_to_text(&buf2, c, new);
1044 bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
1046 "new %s", idx, buf1.buf, buf2.buf);
1047 printbuf_exit(&buf2);
1048 printbuf_exit(&buf1);
1049 bch2_inconsistent_error(c);
1054 bch2_stripes_heap_del(c, m, idx);
1056 memset(m, 0, sizeof(*m));
1058 m->sectors = le16_to_cpu(new_s->sectors);
1059 m->algorithm = new_s->algorithm;
1060 m->nr_blocks = new_s->nr_blocks;
1061 m->nr_redundant = new_s->nr_redundant;
1062 m->blocks_nonempty = 0;
1064 for (i = 0; i < new_s->nr_blocks; i++)
1065 m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
1068 bch2_stripes_heap_insert(c, m, idx);
1070 bch2_stripes_heap_update(c, m, idx);
1073 struct gc_stripe *m =
1074 genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
1077 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
1079 return -BCH_ERR_ENOMEM_mark_stripe;
1082 * This will be wrong when we bring back runtime gc: we should
1083 * be unmarking the old key and then marking the new key
1086 m->sectors = le16_to_cpu(new_s->sectors);
1087 m->nr_blocks = new_s->nr_blocks;
1088 m->nr_redundant = new_s->nr_redundant;
1090 for (i = 0; i < new_s->nr_blocks; i++)
1091 m->ptrs[i] = new_s->ptrs[i];
1093 bch2_bkey_to_replicas(&m->r.e, new);
1096 * gc recalculates this field from stripe ptr
1099 memset(m->block_sectors, 0, sizeof(m->block_sectors));
1101 for (i = 0; i < new_s->nr_blocks; i++) {
1102 ret = mark_stripe_bucket(trans, new, i, flags);
1107 ret = update_replicas(c, new, &m->r.e,
1108 ((s64) m->sectors * m->nr_redundant),
1111 struct printbuf buf = PRINTBUF;
1113 bch2_bkey_val_to_text(&buf, c, new);
1114 bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
1115 printbuf_exit(&buf);
1123 static int __mark_reservation(struct btree_trans *trans,
1124 enum btree_id btree_id, unsigned level,
1125 struct bkey_s_c k, unsigned flags)
1127 struct bch_fs *c = trans->c;
1128 struct bch_fs_usage *fs_usage;
1129 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1130 s64 sectors = (s64) k.k->size;
1132 BUG_ON(!(flags & BTREE_TRIGGER_GC));
1134 if (flags & BTREE_TRIGGER_OVERWRITE)
1136 sectors *= replicas;
1138 percpu_down_read(&c->mark_lock);
1141 fs_usage = fs_usage_ptr(c, trans->journal_res.seq, flags & BTREE_TRIGGER_GC);
1142 replicas = clamp_t(unsigned, replicas, 1,
1143 ARRAY_SIZE(fs_usage->persistent_reserved));
1145 fs_usage->reserved += sectors;
1146 fs_usage->persistent_reserved[replicas - 1] += sectors;
1149 percpu_up_read(&c->mark_lock);
1154 int bch2_mark_reservation(struct btree_trans *trans,
1155 enum btree_id btree_id, unsigned level,
1156 struct bkey_s_c old, struct bkey_s_c new,
1159 return mem_trigger_run_overwrite_then_insert(__mark_reservation, trans, btree_id, level, old, new, flags);
1162 void bch2_trans_fs_usage_revert(struct btree_trans *trans,
1163 struct replicas_delta_list *deltas)
1165 struct bch_fs *c = trans->c;
1166 struct bch_fs_usage *dst;
1167 struct replicas_delta *d, *top = (void *) deltas->d + deltas->used;
1171 percpu_down_read(&c->mark_lock);
1173 dst = fs_usage_ptr(c, trans->journal_res.seq, false);
1175 /* revert changes: */
1176 for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
1177 switch (d->r.data_type) {
1178 case BCH_DATA_btree:
1180 case BCH_DATA_parity:
1183 BUG_ON(__update_replicas(c, dst, &d->r, -d->delta));
1186 dst->nr_inodes -= deltas->nr_inodes;
1188 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
1189 added -= deltas->persistent_reserved[i];
1190 dst->reserved -= deltas->persistent_reserved[i];
1191 dst->persistent_reserved[i] -= deltas->persistent_reserved[i];
1195 trans->disk_res->sectors += added;
1196 this_cpu_add(*c->online_reserved, added);
1200 percpu_up_read(&c->mark_lock);
1203 int bch2_trans_fs_usage_apply(struct btree_trans *trans,
1204 struct replicas_delta_list *deltas)
1206 struct bch_fs *c = trans->c;
1207 static int warned_disk_usage = 0;
1209 u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1210 struct replicas_delta *d, *d2;
1211 struct replicas_delta *top = (void *) deltas->d + deltas->used;
1212 struct bch_fs_usage *dst;
1213 s64 added = 0, should_not_have_added;
1216 percpu_down_read(&c->mark_lock);
1218 dst = fs_usage_ptr(c, trans->journal_res.seq, false);
1220 for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
1221 switch (d->r.data_type) {
1222 case BCH_DATA_btree:
1224 case BCH_DATA_parity:
1228 if (__update_replicas(c, dst, &d->r, d->delta))
1232 dst->nr_inodes += deltas->nr_inodes;
1234 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
1235 added += deltas->persistent_reserved[i];
1236 dst->reserved += deltas->persistent_reserved[i];
1237 dst->persistent_reserved[i] += deltas->persistent_reserved[i];
1241 * Not allowed to reduce sectors_available except by getting a
1244 should_not_have_added = added - (s64) disk_res_sectors;
1245 if (unlikely(should_not_have_added > 0)) {
1246 u64 old, new, v = atomic64_read(&c->sectors_available);
1250 new = max_t(s64, 0, old - should_not_have_added);
1251 } while ((v = atomic64_cmpxchg(&c->sectors_available,
1254 added -= should_not_have_added;
1259 trans->disk_res->sectors -= added;
1260 this_cpu_sub(*c->online_reserved, added);
1264 percpu_up_read(&c->mark_lock);
1266 if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
1267 bch2_trans_inconsistent(trans,
1268 "disk usage increased %lli more than %llu sectors reserved)",
1269 should_not_have_added, disk_res_sectors);
1272 /* revert changes: */
1273 for (d2 = deltas->d; d2 != d; d2 = replicas_delta_next(d2))
1274 BUG_ON(__update_replicas(c, dst, &d2->r, -d2->delta));
1277 percpu_up_read(&c->mark_lock);
1283 static inline int bch2_trans_mark_pointer(struct btree_trans *trans,
1284 enum btree_id btree_id, unsigned level,
1285 struct bkey_s_c k, struct extent_ptr_decoded p,
1288 bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
1289 struct btree_iter iter;
1290 struct bkey_i_alloc_v4 *a;
1292 struct bch_backpointer bp;
1296 bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket, &bp);
1297 sectors = bp.bucket_len;
1301 a = bch2_trans_start_alloc_update(trans, &iter, bucket);
1305 ret = __mark_pointer(trans, k, &p.ptr, sectors, bp.data_type,
1306 a->v.gen, &a->v.data_type,
1307 &a->v.dirty_sectors, &a->v.cached_sectors) ?:
1308 bch2_trans_update(trans, &iter, &a->k_i, 0);
1309 bch2_trans_iter_exit(trans, &iter);
1314 if (!p.ptr.cached) {
1315 ret = bch2_bucket_backpointer_mod(trans, bucket, bp, k, insert);
1323 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1324 struct extent_ptr_decoded p,
1325 s64 sectors, enum bch_data_type data_type)
1327 struct btree_iter iter;
1328 struct bkey_i_stripe *s;
1329 struct bch_replicas_padded r;
1332 s = bch2_bkey_get_mut_typed(trans, &iter,
1333 BTREE_ID_stripes, POS(0, p.ec.idx),
1334 BTREE_ITER_WITH_UPDATES, stripe);
1335 ret = PTR_ERR_OR_ZERO(s);
1336 if (unlikely(ret)) {
1337 bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
1338 "pointer to nonexistent stripe %llu",
1343 if (!bch2_ptr_matches_stripe(&s->v, p)) {
1344 bch2_trans_inconsistent(trans,
1345 "stripe pointer doesn't match stripe %llu",
1351 stripe_blockcount_set(&s->v, p.ec.block,
1352 stripe_blockcount_get(&s->v, p.ec.block) +
1355 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1356 r.e.data_type = data_type;
1357 ret = bch2_update_replicas_list(trans, &r.e, sectors);
1359 bch2_trans_iter_exit(trans, &iter);
1363 static int __trans_mark_extent(struct btree_trans *trans,
1364 enum btree_id btree_id, unsigned level,
1365 struct bkey_s_c k, unsigned flags)
1367 struct bch_fs *c = trans->c;
1368 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1369 const union bch_extent_entry *entry;
1370 struct extent_ptr_decoded p;
1371 struct bch_replicas_padded r;
1372 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
1375 s64 sectors = bkey_is_btree_ptr(k.k)
1378 s64 dirty_sectors = 0;
1382 r.e.data_type = data_type;
1384 r.e.nr_required = 1;
1386 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1387 s64 disk_sectors = ptr_disk_sectors(sectors, p);
1389 if (flags & BTREE_TRIGGER_OVERWRITE)
1390 disk_sectors = -disk_sectors;
1392 ret = bch2_trans_mark_pointer(trans, btree_id, level, k, p, flags);
1400 ret = bch2_update_cached_sectors_list(trans, p.ptr.dev,
1405 } else if (!p.has_ec) {
1406 dirty_sectors += disk_sectors;
1407 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1409 ret = bch2_trans_mark_stripe_ptr(trans, p,
1410 disk_sectors, data_type);
1414 r.e.nr_required = 0;
1419 ret = bch2_update_replicas_list(trans, &r.e, dirty_sectors);
1424 int bch2_trans_mark_extent(struct btree_trans *trans,
1425 enum btree_id btree_id, unsigned level,
1426 struct bkey_s_c old, struct bkey_i *new,
1429 struct bch_fs *c = trans->c;
1430 int mod = (int) bch2_bkey_needs_rebalance(c, bkey_i_to_s_c(new)) -
1431 (int) bch2_bkey_needs_rebalance(c, old);
1434 int ret = bch2_btree_bit_mod(trans, BTREE_ID_rebalance_work, new->k.p, mod > 0);
1439 return trigger_run_overwrite_then_insert(__trans_mark_extent, trans, btree_id, level, old, new, flags);
1442 static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
1443 struct bkey_s_c_stripe s,
1444 unsigned idx, bool deleting)
1446 struct bch_fs *c = trans->c;
1447 const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
1448 struct btree_iter iter;
1449 struct bkey_i_alloc_v4 *a;
1450 enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
1451 ? BCH_DATA_parity : 0;
1452 s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
1458 a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(c, ptr));
1462 ret = check_bucket_ref(trans, s.s_c, ptr, sectors, data_type,
1463 a->v.gen, a->v.data_type,
1464 a->v.dirty_sectors);
1469 if (bch2_trans_inconsistent_on(a->v.stripe ||
1470 a->v.stripe_redundancy, trans,
1471 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
1472 iter.pos.inode, iter.pos.offset, a->v.gen,
1473 bch2_data_types[a->v.data_type],
1475 a->v.stripe, s.k->p.offset)) {
1480 if (bch2_trans_inconsistent_on(data_type && a->v.dirty_sectors, trans,
1481 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
1482 iter.pos.inode, iter.pos.offset, a->v.gen,
1483 bch2_data_types[a->v.data_type],
1490 a->v.stripe = s.k->p.offset;
1491 a->v.stripe_redundancy = s.v->nr_redundant;
1492 a->v.data_type = BCH_DATA_stripe;
1494 if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
1495 a->v.stripe_redundancy != s.v->nr_redundant, trans,
1496 "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
1497 iter.pos.inode, iter.pos.offset, a->v.gen,
1498 s.k->p.offset, a->v.stripe)) {
1504 a->v.stripe_redundancy = 0;
1505 a->v.data_type = alloc_data_type(a->v, BCH_DATA_user);
1508 a->v.dirty_sectors += sectors;
1510 a->v.data_type = !deleting ? data_type : 0;
1512 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1516 bch2_trans_iter_exit(trans, &iter);
1520 int bch2_trans_mark_stripe(struct btree_trans *trans,
1521 enum btree_id btree_id, unsigned level,
1522 struct bkey_s_c old, struct bkey_i *new,
1525 const struct bch_stripe *old_s = NULL;
1526 struct bch_stripe *new_s = NULL;
1527 struct bch_replicas_padded r;
1528 unsigned i, nr_blocks;
1531 if (old.k->type == KEY_TYPE_stripe)
1532 old_s = bkey_s_c_to_stripe(old).v;
1533 if (new->k.type == KEY_TYPE_stripe)
1534 new_s = &bkey_i_to_stripe(new)->v;
1537 * If the pointers aren't changing, we don't need to do anything:
1539 if (new_s && old_s &&
1540 new_s->nr_blocks == old_s->nr_blocks &&
1541 new_s->nr_redundant == old_s->nr_redundant &&
1542 !memcmp(old_s->ptrs, new_s->ptrs,
1543 new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
1546 BUG_ON(new_s && old_s &&
1547 (new_s->nr_blocks != old_s->nr_blocks ||
1548 new_s->nr_redundant != old_s->nr_redundant));
1550 nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
1553 s64 sectors = le16_to_cpu(new_s->sectors);
1555 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(new));
1556 ret = bch2_update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant);
1562 s64 sectors = -((s64) le16_to_cpu(old_s->sectors));
1564 bch2_bkey_to_replicas(&r.e, old);
1565 ret = bch2_update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant);
1570 for (i = 0; i < nr_blocks; i++) {
1571 if (new_s && old_s &&
1572 !memcmp(&new_s->ptrs[i],
1574 sizeof(new_s->ptrs[i])))
1578 ret = bch2_trans_mark_stripe_bucket(trans,
1579 bkey_i_to_s_c_stripe(new), i, false);
1585 ret = bch2_trans_mark_stripe_bucket(trans,
1586 bkey_s_c_to_stripe(old), i, true);
1595 static int __trans_mark_reservation(struct btree_trans *trans,
1596 enum btree_id btree_id, unsigned level,
1597 struct bkey_s_c k, unsigned flags)
1599 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1600 s64 sectors = (s64) k.k->size;
1601 struct replicas_delta_list *d;
1604 if (flags & BTREE_TRIGGER_OVERWRITE)
1606 sectors *= replicas;
1608 ret = bch2_replicas_deltas_realloc(trans, 0);
1612 d = trans->fs_usage_deltas;
1613 replicas = clamp_t(unsigned, replicas, 1,
1614 ARRAY_SIZE(d->persistent_reserved));
1616 d->persistent_reserved[replicas - 1] += sectors;
1620 int bch2_trans_mark_reservation(struct btree_trans *trans,
1621 enum btree_id btree_id, unsigned level,
1622 struct bkey_s_c old,
1626 return trigger_run_overwrite_then_insert(__trans_mark_reservation, trans, btree_id, level, old, new, flags);
1629 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1630 struct bch_dev *ca, size_t b,
1631 enum bch_data_type type,
1634 struct bch_fs *c = trans->c;
1635 struct btree_iter iter;
1636 struct bkey_i_alloc_v4 *a;
1640 * Backup superblock might be past the end of our normal usable space:
1642 if (b >= ca->mi.nbuckets)
1645 a = bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
1649 if (a->v.data_type && type && a->v.data_type != type) {
1650 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1651 BCH_FSCK_ERR_bucket_metadata_type_mismatch,
1652 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
1654 iter.pos.inode, iter.pos.offset, a->v.gen,
1655 bch2_data_types[a->v.data_type],
1656 bch2_data_types[type],
1657 bch2_data_types[type]);
1662 if (a->v.data_type != type ||
1663 a->v.dirty_sectors != sectors) {
1664 a->v.data_type = type;
1665 a->v.dirty_sectors = sectors;
1666 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1669 bch2_trans_iter_exit(trans, &iter);
1673 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1674 struct bch_dev *ca, size_t b,
1675 enum bch_data_type type,
1678 return commit_do(trans, NULL, NULL, 0,
1679 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
1682 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
1685 enum bch_data_type type,
1686 u64 *bucket, unsigned *bucket_sectors)
1689 u64 b = sector_to_bucket(ca, start);
1691 min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1693 if (b != *bucket && *bucket_sectors) {
1694 int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1695 type, *bucket_sectors);
1699 *bucket_sectors = 0;
1703 *bucket_sectors += sectors;
1705 } while (start < end);
1710 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
1713 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1715 unsigned i, bucket_sectors = 0;
1718 for (i = 0; i < layout->nr_superblocks; i++) {
1719 u64 offset = le64_to_cpu(layout->sb_offset[i]);
1721 if (offset == BCH_SB_SECTOR) {
1722 ret = bch2_trans_mark_metadata_sectors(trans, ca,
1724 BCH_DATA_sb, &bucket, &bucket_sectors);
1729 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1730 offset + (1 << layout->sb_max_size_bits),
1731 BCH_DATA_sb, &bucket, &bucket_sectors);
1736 if (bucket_sectors) {
1737 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1738 bucket, BCH_DATA_sb, bucket_sectors);
1743 for (i = 0; i < ca->journal.nr; i++) {
1744 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1745 ca->journal.buckets[i],
1746 BCH_DATA_journal, ca->mi.bucket_size);
1754 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
1756 int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(trans, ca));
1762 int bch2_trans_mark_dev_sbs(struct bch_fs *c)
1764 for_each_online_member(c, ca) {
1765 int ret = bch2_trans_mark_dev_sb(c, ca);
1767 percpu_ref_put(&ca->ref);
1775 /* Disk reservations: */
1777 #define SECTORS_CACHE 1024
1779 int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1780 u64 sectors, int flags)
1782 struct bch_fs_pcpu *pcpu;
1784 s64 sectors_available;
1787 percpu_down_read(&c->mark_lock);
1789 pcpu = this_cpu_ptr(c->pcpu);
1791 if (sectors <= pcpu->sectors_available)
1794 v = atomic64_read(&c->sectors_available);
1797 get = min((u64) sectors + SECTORS_CACHE, old);
1799 if (get < sectors) {
1803 } while ((v = atomic64_cmpxchg(&c->sectors_available,
1804 old, old - get)) != old);
1806 pcpu->sectors_available += get;
1809 pcpu->sectors_available -= sectors;
1810 this_cpu_add(*c->online_reserved, sectors);
1811 res->sectors += sectors;
1814 percpu_up_read(&c->mark_lock);
1818 mutex_lock(&c->sectors_available_lock);
1820 percpu_u64_set(&c->pcpu->sectors_available, 0);
1821 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
1823 if (sectors <= sectors_available ||
1824 (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1825 atomic64_set(&c->sectors_available,
1826 max_t(s64, 0, sectors_available - sectors));
1827 this_cpu_add(*c->online_reserved, sectors);
1828 res->sectors += sectors;
1831 atomic64_set(&c->sectors_available, sectors_available);
1832 ret = -BCH_ERR_ENOSPC_disk_reservation;
1835 mutex_unlock(&c->sectors_available_lock);
1836 percpu_up_read(&c->mark_lock);
1841 /* Startup/shutdown: */
1843 static void bucket_gens_free_rcu(struct rcu_head *rcu)
1845 struct bucket_gens *buckets =
1846 container_of(rcu, struct bucket_gens, rcu);
1848 kvpfree(buckets, sizeof(*buckets) + buckets->nbuckets);
1851 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1853 struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
1854 unsigned long *buckets_nouse = NULL;
1855 bool resize = ca->bucket_gens != NULL;
1858 if (!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
1859 GFP_KERNEL|__GFP_ZERO))) {
1860 ret = -BCH_ERR_ENOMEM_bucket_gens;
1864 if ((c->opts.buckets_nouse &&
1865 !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
1866 sizeof(unsigned long),
1867 GFP_KERNEL|__GFP_ZERO)))) {
1868 ret = -BCH_ERR_ENOMEM_buckets_nouse;
1872 bucket_gens->first_bucket = ca->mi.first_bucket;
1873 bucket_gens->nbuckets = nbuckets;
1876 down_write(&c->gc_lock);
1877 down_write(&ca->bucket_lock);
1878 percpu_down_write(&c->mark_lock);
1881 old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
1884 size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets);
1886 memcpy(bucket_gens->b,
1890 memcpy(buckets_nouse,
1892 BITS_TO_LONGS(n) * sizeof(unsigned long));
1895 rcu_assign_pointer(ca->bucket_gens, bucket_gens);
1896 bucket_gens = old_bucket_gens;
1898 swap(ca->buckets_nouse, buckets_nouse);
1900 nbuckets = ca->mi.nbuckets;
1903 percpu_up_write(&c->mark_lock);
1904 up_write(&ca->bucket_lock);
1905 up_write(&c->gc_lock);
1910 kvpfree(buckets_nouse,
1911 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
1913 call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
1918 void bch2_dev_buckets_free(struct bch_dev *ca)
1922 kvpfree(ca->buckets_nouse,
1923 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
1924 kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
1925 sizeof(struct bucket_gens) + ca->mi.nbuckets);
1927 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
1928 free_percpu(ca->usage[i]);
1929 kfree(ca->usage_base);
1932 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
1936 ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
1937 if (!ca->usage_base)
1938 return -BCH_ERR_ENOMEM_usage_init;
1940 for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
1941 ca->usage[i] = alloc_percpu(struct bch_dev_usage);
1943 return -BCH_ERR_ENOMEM_usage_init;
1946 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);