1 // SPDX-License-Identifier: GPL-2.0
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
9 #include "alloc_background.h"
12 #include "btree_update.h"
21 #include "subvolume.h"
23 #include <linux/preempt.h>
24 #include <trace/events/bcachefs.h>
26 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
27 enum bch_data_type data_type,
32 fs_usage->btree += sectors;
36 fs_usage->data += sectors;
39 fs_usage->cached += sectors;
47 * Clear journal_seq_valid for buckets for which it's not needed, to prevent
50 void bch2_bucket_seq_cleanup(struct bch_fs *c)
52 u64 journal_seq = atomic64_read(&c->journal.seq);
53 u16 last_seq_ondisk = c->journal.flushed_seq_ondisk;
55 struct bucket_array *buckets;
60 if (journal_seq - c->last_bucket_seq_cleanup <
61 (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
64 c->last_bucket_seq_cleanup = journal_seq;
66 for_each_member_device(ca, c, i) {
67 down_read(&ca->bucket_lock);
68 buckets = bucket_array(ca);
70 for_each_bucket(g, buckets) {
71 bucket_cmpxchg(g, m, ({
72 if (!m.journal_seq_valid ||
73 bucket_needs_journal_commit(m, last_seq_ondisk))
76 m.journal_seq_valid = 0;
79 up_read(&ca->bucket_lock);
83 void bch2_fs_usage_initialize(struct bch_fs *c)
85 struct bch_fs_usage *usage;
89 percpu_down_write(&c->mark_lock);
90 usage = c->usage_base;
92 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
93 bch2_fs_usage_acc_to_base(c, i);
95 for (i = 0; i < BCH_REPLICAS_MAX; i++)
96 usage->reserved += usage->persistent_reserved[i];
98 for (i = 0; i < c->replicas.nr; i++) {
99 struct bch_replicas_entry *e =
100 cpu_replicas_entry(&c->replicas, i);
102 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
105 for_each_member_device(ca, c, i) {
106 struct bch_dev_usage dev = bch2_dev_usage_read(ca);
108 usage->hidden += (dev.d[BCH_DATA_sb].buckets +
109 dev.d[BCH_DATA_journal].buckets) *
113 percpu_up_write(&c->mark_lock);
116 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
117 unsigned journal_seq,
120 BUG_ON(!gc && !journal_seq);
122 return this_cpu_ptr(gc
124 : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
127 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
129 struct bch_fs *c = ca->fs;
130 struct bch_dev_usage ret;
131 unsigned seq, i, u64s = dev_usage_u64s();
134 seq = read_seqcount_begin(&c->usage_lock);
135 memcpy(&ret, ca->usage_base, u64s * sizeof(u64));
136 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
137 acc_u64s_percpu((u64 *) &ret, (u64 __percpu *) ca->usage[i], u64s);
138 } while (read_seqcount_retry(&c->usage_lock, seq));
143 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
144 unsigned journal_seq,
147 percpu_rwsem_assert_held(&c->mark_lock);
148 BUG_ON(!gc && !journal_seq);
150 return this_cpu_ptr(gc
152 : c->usage[journal_seq & JOURNAL_BUF_MASK]);
155 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
157 ssize_t offset = v - (u64 *) c->usage_base;
161 BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
162 percpu_rwsem_assert_held(&c->mark_lock);
165 seq = read_seqcount_begin(&c->usage_lock);
168 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
169 ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
170 } while (read_seqcount_retry(&c->usage_lock, seq));
175 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
177 struct bch_fs_usage_online *ret;
178 unsigned seq, i, u64s;
180 percpu_down_read(&c->mark_lock);
182 ret = kmalloc(sizeof(struct bch_fs_usage_online) +
183 sizeof(u64) * c->replicas.nr, GFP_NOFS);
184 if (unlikely(!ret)) {
185 percpu_up_read(&c->mark_lock);
189 ret->online_reserved = percpu_u64_get(c->online_reserved);
191 u64s = fs_usage_u64s(c);
193 seq = read_seqcount_begin(&c->usage_lock);
194 memcpy(&ret->u, c->usage_base, u64s * sizeof(u64));
195 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
196 acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], u64s);
197 } while (read_seqcount_retry(&c->usage_lock, seq));
202 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
205 unsigned i, u64s = fs_usage_u64s(c);
207 BUG_ON(idx >= ARRAY_SIZE(c->usage));
210 write_seqcount_begin(&c->usage_lock);
212 acc_u64s_percpu((u64 *) c->usage_base,
213 (u64 __percpu *) c->usage[idx], u64s);
214 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
217 for_each_member_device_rcu(ca, c, i, NULL) {
218 u64s = dev_usage_u64s();
220 acc_u64s_percpu((u64 *) ca->usage_base,
221 (u64 __percpu *) ca->usage[idx], u64s);
222 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
226 write_seqcount_end(&c->usage_lock);
230 void bch2_fs_usage_to_text(struct printbuf *out,
232 struct bch_fs_usage_online *fs_usage)
236 pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
238 pr_buf(out, "hidden:\t\t\t\t%llu\n",
240 pr_buf(out, "data:\t\t\t\t%llu\n",
242 pr_buf(out, "cached:\t\t\t\t%llu\n",
244 pr_buf(out, "reserved:\t\t\t%llu\n",
245 fs_usage->u.reserved);
246 pr_buf(out, "nr_inodes:\t\t\t%llu\n",
247 fs_usage->u.nr_inodes);
248 pr_buf(out, "online reserved:\t\t%llu\n",
249 fs_usage->online_reserved);
252 i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
254 pr_buf(out, "%u replicas:\n", i + 1);
255 pr_buf(out, "\treserved:\t\t%llu\n",
256 fs_usage->u.persistent_reserved[i]);
259 for (i = 0; i < c->replicas.nr; i++) {
260 struct bch_replicas_entry *e =
261 cpu_replicas_entry(&c->replicas, i);
264 bch2_replicas_entry_to_text(out, e);
265 pr_buf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
269 static u64 reserve_factor(u64 r)
271 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
274 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
276 return min(fs_usage->u.hidden +
279 reserve_factor(fs_usage->u.reserved +
280 fs_usage->online_reserved),
284 static struct bch_fs_usage_short
285 __bch2_fs_usage_read_short(struct bch_fs *c)
287 struct bch_fs_usage_short ret;
290 ret.capacity = c->capacity -
291 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
293 data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
294 bch2_fs_usage_read_one(c, &c->usage_base->btree);
295 reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
296 percpu_u64_get(c->online_reserved);
298 ret.used = min(ret.capacity, data + reserve_factor(reserved));
299 ret.free = ret.capacity - ret.used;
301 ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
306 struct bch_fs_usage_short
307 bch2_fs_usage_read_short(struct bch_fs *c)
309 struct bch_fs_usage_short ret;
311 percpu_down_read(&c->mark_lock);
312 ret = __bch2_fs_usage_read_short(c);
313 percpu_up_read(&c->mark_lock);
318 static inline int is_unavailable_bucket(struct bucket_mark m)
320 return !is_available_bucket(m);
323 static inline int bucket_sectors_fragmented(struct bch_dev *ca,
324 struct bucket_mark m)
326 return bucket_sectors_used(m)
327 ? max(0, (int) ca->mi.bucket_size - (int) bucket_sectors_used(m))
331 static inline int is_stripe_data_bucket(struct bucket_mark m)
333 return m.stripe && m.data_type != BCH_DATA_parity;
336 static inline enum bch_data_type bucket_type(struct bucket_mark m)
338 return m.cached_sectors && !m.dirty_sectors
343 static inline void account_bucket(struct bch_fs_usage *fs_usage,
344 struct bch_dev_usage *dev_usage,
345 enum bch_data_type type,
348 if (type == BCH_DATA_sb || type == BCH_DATA_journal)
349 fs_usage->hidden += size;
351 dev_usage->d[type].buckets += nr;
354 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
355 struct bucket_mark old, struct bucket_mark new,
356 u64 journal_seq, bool gc)
358 struct bch_fs_usage *fs_usage;
359 struct bch_dev_usage *u;
362 fs_usage = fs_usage_ptr(c, journal_seq, gc);
363 u = dev_usage_ptr(ca, journal_seq, gc);
365 if (bucket_type(old))
366 account_bucket(fs_usage, u, bucket_type(old),
367 -1, -ca->mi.bucket_size);
369 if (bucket_type(new))
370 account_bucket(fs_usage, u, bucket_type(new),
371 1, ca->mi.bucket_size);
373 u->buckets_ec += (int) new.stripe - (int) old.stripe;
374 u->buckets_unavailable +=
375 is_unavailable_bucket(new) - is_unavailable_bucket(old);
377 u->d[old.data_type].sectors -= old.dirty_sectors;
378 u->d[new.data_type].sectors += new.dirty_sectors;
379 u->d[BCH_DATA_cached].sectors +=
380 (int) new.cached_sectors - (int) old.cached_sectors;
382 u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old);
383 u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new);
387 if (!is_available_bucket(old) && is_available_bucket(new))
388 bch2_wake_allocator(ca);
391 static inline int __update_replicas(struct bch_fs *c,
392 struct bch_fs_usage *fs_usage,
393 struct bch_replicas_entry *r,
396 int idx = bch2_replicas_entry_idx(c, r);
401 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
402 fs_usage->replicas[idx] += sectors;
406 static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
407 struct bch_replicas_entry *r, s64 sectors,
408 unsigned journal_seq, bool gc)
410 struct bch_fs_usage __percpu *fs_usage;
414 percpu_down_read(&c->mark_lock);
416 idx = bch2_replicas_entry_idx(c, r);
418 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
419 fsck_err(c, "no replicas entry\n"
421 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))) {
422 percpu_up_read(&c->mark_lock);
423 ret = bch2_mark_replicas(c, r);
427 percpu_down_read(&c->mark_lock);
428 idx = bch2_replicas_entry_idx(c, r);
436 fs_usage = fs_usage_ptr(c, journal_seq, gc);
437 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
438 fs_usage->replicas[idx] += sectors;
442 percpu_up_read(&c->mark_lock);
446 static inline int update_cached_sectors(struct bch_fs *c,
448 unsigned dev, s64 sectors,
449 unsigned journal_seq, bool gc)
451 struct bch_replicas_padded r;
453 bch2_replicas_entry_cached(&r.e, dev);
455 return update_replicas(c, k, &r.e, sectors, journal_seq, gc);
458 static struct replicas_delta_list *
459 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
461 struct replicas_delta_list *d = trans->fs_usage_deltas;
462 unsigned new_size = d ? (d->size + more) * 2 : 128;
463 unsigned alloc_size = sizeof(*d) + new_size;
465 WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
467 if (!d || d->used + more > d->size) {
468 d = krealloc(d, alloc_size, GFP_NOIO|__GFP_ZERO);
470 BUG_ON(!d && alloc_size > REPLICAS_DELTA_LIST_MAX);
473 d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOIO);
474 memset(d, 0, REPLICAS_DELTA_LIST_MAX);
476 if (trans->fs_usage_deltas)
477 memcpy(d, trans->fs_usage_deltas,
478 trans->fs_usage_deltas->size + sizeof(*d));
480 new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
481 kfree(trans->fs_usage_deltas);
485 trans->fs_usage_deltas = d;
490 static inline void update_replicas_list(struct btree_trans *trans,
491 struct bch_replicas_entry *r,
494 struct replicas_delta_list *d;
495 struct replicas_delta *n;
501 b = replicas_entry_bytes(r) + 8;
502 d = replicas_deltas_realloc(trans, b);
504 n = (void *) d->d + d->used;
506 memcpy(&n->r, r, replicas_entry_bytes(r));
507 bch2_replicas_entry_sort(&n->r);
511 static inline void update_cached_sectors_list(struct btree_trans *trans,
512 unsigned dev, s64 sectors)
514 struct bch_replicas_padded r;
516 bch2_replicas_entry_cached(&r.e, dev);
518 update_replicas_list(trans, &r.e, sectors);
521 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
522 size_t b, bool owned_by_allocator)
524 struct bucket *g = bucket(ca, b);
525 struct bucket_mark old, new;
527 old = bucket_cmpxchg(g, new, ({
528 new.owned_by_allocator = owned_by_allocator;
531 BUG_ON(owned_by_allocator == old.owned_by_allocator);
534 static inline u8 bkey_alloc_gen(struct bkey_s_c k)
538 return bkey_s_c_to_alloc(k).v->gen;
539 case KEY_TYPE_alloc_v2:
540 return bkey_s_c_to_alloc_v2(k).v->gen;
541 case KEY_TYPE_alloc_v3:
542 return bkey_s_c_to_alloc_v3(k).v->gen;
548 static int bch2_mark_alloc(struct btree_trans *trans,
549 struct bkey_s_c old, struct bkey_s_c new,
552 bool gc = flags & BTREE_TRIGGER_GC;
553 u64 journal_seq = trans->journal_res.seq;
554 struct bch_fs *c = trans->c;
555 struct bkey_alloc_unpacked old_u = bch2_alloc_unpack(old);
556 struct bkey_alloc_unpacked new_u = bch2_alloc_unpack(new);
559 struct bucket_mark old_m, m;
563 * alloc btree is read in by bch2_alloc_read, not gc:
565 if ((flags & BTREE_TRIGGER_GC) &&
566 !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
569 if ((flags & BTREE_TRIGGER_INSERT) &&
570 !old_u.data_type != !new_u.data_type &&
571 new.k->type == KEY_TYPE_alloc_v3) {
572 struct bch_alloc_v3 *v = (struct bch_alloc_v3 *) new.v;
574 BUG_ON(!journal_seq);
577 * If the btree updates referring to a bucket weren't flushed
578 * before the bucket became empty again, then the we don't have
579 * to wait on a journal flush before we can reuse the bucket:
581 v->journal_seq = !new_u.data_type &&
582 bch2_journal_noflush_seq(&c->journal, journal_seq)
583 ? 0 : cpu_to_le64(journal_seq);
586 ca = bch_dev_bkey_exists(c, new.k->p.inode);
588 if (new.k->p.offset >= ca->mi.nbuckets)
591 percpu_down_read(&c->mark_lock);
592 if (!gc && new_u.gen != bkey_alloc_gen(old))
593 *bucket_gen(ca, new.k->p.offset) = new_u.gen;
595 g = __bucket(ca, new.k->p.offset, gc);
597 old_m = bucket_cmpxchg(g, m, ({
599 m.data_type = new_u.data_type;
600 m.dirty_sectors = new_u.dirty_sectors;
601 m.cached_sectors = new_u.cached_sectors;
602 m.stripe = new_u.stripe != 0;
605 m.journal_seq_valid = 1;
606 m.journal_seq = journal_seq;
610 bch2_dev_usage_update(c, ca, old_m, m, journal_seq, gc);
612 g->io_time[READ] = new_u.read_time;
613 g->io_time[WRITE] = new_u.write_time;
614 g->oldest_gen = new_u.oldest_gen;
616 g->stripe = new_u.stripe;
617 g->stripe_redundancy = new_u.stripe_redundancy;
618 percpu_up_read(&c->mark_lock);
621 * need to know if we're getting called from the invalidate path or
625 if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
626 old_m.cached_sectors) {
627 ret = update_cached_sectors(c, new, ca->dev_idx,
628 -old_m.cached_sectors,
631 bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors");
635 trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
636 old_m.cached_sectors);
642 #define checked_add(a, b) \
644 unsigned _res = (unsigned) (a) + (b); \
645 bool overflow = _res > U16_MAX; \
652 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
653 size_t b, enum bch_data_type data_type,
654 unsigned sectors, struct gc_pos pos,
658 struct bucket_mark old, new;
661 BUG_ON(!(flags & BTREE_TRIGGER_GC));
662 BUG_ON(data_type != BCH_DATA_sb &&
663 data_type != BCH_DATA_journal);
666 * Backup superblock might be past the end of our normal usable space:
668 if (b >= ca->mi.nbuckets)
671 percpu_down_read(&c->mark_lock);
672 g = gc_bucket(ca, b);
673 old = bucket_cmpxchg(g, new, ({
674 new.data_type = data_type;
675 overflow = checked_add(new.dirty_sectors, sectors);
678 bch2_fs_inconsistent_on(old.data_type &&
679 old.data_type != data_type, c,
680 "different types of data in same bucket: %s, %s",
681 bch2_data_types[old.data_type],
682 bch2_data_types[data_type]);
684 bch2_fs_inconsistent_on(overflow, c,
685 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
686 ca->dev_idx, b, new.gen,
687 bch2_data_types[old.data_type ?: data_type],
688 old.dirty_sectors, sectors);
690 bch2_dev_usage_update(c, ca, old, new, 0, true);
691 percpu_up_read(&c->mark_lock);
694 static s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
696 EBUG_ON(sectors < 0);
698 return p.crc.compression_type &&
699 p.crc.compression_type != BCH_COMPRESSION_TYPE_incompressible
700 ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
701 p.crc.uncompressed_size)
705 static int check_bucket_ref(struct bch_fs *c,
707 const struct bch_extent_ptr *ptr,
708 s64 sectors, enum bch_data_type ptr_data_type,
709 u8 bucket_gen, u8 bucket_data_type,
710 u16 dirty_sectors, u16 cached_sectors)
712 size_t bucket_nr = PTR_BUCKET_NR(bch_dev_bkey_exists(c, ptr->dev), ptr);
713 u16 bucket_sectors = !ptr->cached
718 if (gen_after(ptr->gen, bucket_gen)) {
719 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
720 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
722 ptr->dev, bucket_nr, bucket_gen,
723 bch2_data_types[bucket_data_type ?: ptr_data_type],
725 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
729 if (gen_cmp(bucket_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
730 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
731 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
733 ptr->dev, bucket_nr, bucket_gen,
734 bch2_data_types[bucket_data_type ?: ptr_data_type],
736 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
740 if (bucket_gen != ptr->gen && !ptr->cached) {
741 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
742 "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
744 ptr->dev, bucket_nr, bucket_gen,
745 bch2_data_types[bucket_data_type ?: ptr_data_type],
747 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
751 if (bucket_gen != ptr->gen)
754 if (bucket_data_type && ptr_data_type &&
755 bucket_data_type != ptr_data_type) {
756 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
757 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
759 ptr->dev, bucket_nr, bucket_gen,
760 bch2_data_types[bucket_data_type],
761 bch2_data_types[ptr_data_type],
762 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
766 if ((unsigned) (bucket_sectors + sectors) > U16_MAX) {
767 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
768 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
770 ptr->dev, bucket_nr, bucket_gen,
771 bch2_data_types[bucket_data_type ?: ptr_data_type],
772 bucket_sectors, sectors,
773 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
780 static int mark_stripe_bucket(struct btree_trans *trans,
783 u64 journal_seq, unsigned flags)
785 struct bch_fs *c = trans->c;
786 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
787 unsigned nr_data = s->nr_blocks - s->nr_redundant;
788 bool parity = ptr_idx >= nr_data;
789 enum bch_data_type data_type = parity ? BCH_DATA_parity : 0;
790 s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
791 const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
792 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
794 struct bucket_mark new, old;
798 BUG_ON(!(flags & BTREE_TRIGGER_GC));
800 /* * XXX doesn't handle deletion */
802 percpu_down_read(&c->mark_lock);
803 g = PTR_GC_BUCKET(ca, ptr);
805 if (g->mark.dirty_sectors ||
806 (g->stripe && g->stripe != k.k->p.offset)) {
807 bch2_fs_inconsistent(c,
808 "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
809 ptr->dev, PTR_BUCKET_NR(ca, ptr), g->mark.gen,
810 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
815 old = bucket_cmpxchg(g, new, ({
816 ret = check_bucket_ref(c, k, ptr, sectors, data_type,
817 new.gen, new.data_type,
818 new.dirty_sectors, new.cached_sectors);
822 new.dirty_sectors += sectors;
824 new.data_type = data_type;
827 new.journal_seq_valid = 1;
828 new.journal_seq = journal_seq;
834 g->stripe = k.k->p.offset;
835 g->stripe_redundancy = s->nr_redundant;
837 bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
839 percpu_up_read(&c->mark_lock);
844 static int __mark_pointer(struct btree_trans *trans,
846 const struct bch_extent_ptr *ptr,
847 s64 sectors, enum bch_data_type ptr_data_type,
848 u8 bucket_gen, u8 *bucket_data_type,
849 u16 *dirty_sectors, u16 *cached_sectors)
851 u16 *dst_sectors = !ptr->cached
854 int ret = check_bucket_ref(trans->c, k, ptr, sectors, ptr_data_type,
855 bucket_gen, *bucket_data_type,
856 *dirty_sectors, *cached_sectors);
861 *dst_sectors += sectors;
862 *bucket_data_type = *dirty_sectors || *cached_sectors
867 static int bch2_mark_pointer(struct btree_trans *trans,
869 struct extent_ptr_decoded p,
870 s64 sectors, enum bch_data_type data_type,
873 u64 journal_seq = trans->journal_res.seq;
874 struct bch_fs *c = trans->c;
875 struct bucket_mark old, new;
876 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
882 BUG_ON(!(flags & BTREE_TRIGGER_GC));
884 percpu_down_read(&c->mark_lock);
885 g = PTR_GC_BUCKET(ca, &p.ptr);
887 v = atomic64_read(&g->_mark.v);
889 new.v.counter = old.v.counter = v;
890 bucket_data_type = new.data_type;
892 ret = __mark_pointer(trans, k, &p.ptr, sectors,
896 &new.cached_sectors);
900 new.data_type = bucket_data_type;
903 new.journal_seq_valid = 1;
904 new.journal_seq = journal_seq;
907 if (flags & BTREE_TRIGGER_NOATOMIC) {
911 } while ((v = atomic64_cmpxchg(&g->_mark.v,
913 new.v.counter)) != old.v.counter);
915 bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
917 percpu_up_read(&c->mark_lock);
922 static int bch2_mark_stripe_ptr(struct btree_trans *trans,
924 struct bch_extent_stripe_ptr p,
925 enum bch_data_type data_type,
929 struct bch_fs *c = trans->c;
930 struct bch_replicas_padded r;
933 BUG_ON(!(flags & BTREE_TRIGGER_GC));
935 m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
937 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
942 spin_lock(&c->ec_stripes_heap_lock);
944 if (!m || !m->alive) {
945 spin_unlock(&c->ec_stripes_heap_lock);
946 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
948 bch2_inconsistent_error(c);
952 m->block_sectors[p.block] += sectors;
955 spin_unlock(&c->ec_stripes_heap_lock);
957 r.e.data_type = data_type;
958 update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
963 static int bch2_mark_extent(struct btree_trans *trans,
964 struct bkey_s_c old, struct bkey_s_c new,
967 u64 journal_seq = trans->journal_res.seq;
968 struct bch_fs *c = trans->c;
969 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
970 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
971 const union bch_extent_entry *entry;
972 struct extent_ptr_decoded p;
973 struct bch_replicas_padded r;
974 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
977 s64 sectors = bkey_is_btree_ptr(k.k)
980 s64 dirty_sectors = 0;
984 BUG_ON(!(flags & BTREE_TRIGGER_GC));
986 r.e.data_type = data_type;
990 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
991 s64 disk_sectors = ptr_disk_sectors(sectors, p);
993 if (flags & BTREE_TRIGGER_OVERWRITE)
994 disk_sectors = -disk_sectors;
996 ret = bch2_mark_pointer(trans, k, p, disk_sectors,
1005 ret = update_cached_sectors(c, k, p.ptr.dev,
1006 disk_sectors, journal_seq, true);
1008 bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
1012 } else if (!p.has_ec) {
1013 dirty_sectors += disk_sectors;
1014 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1016 ret = bch2_mark_stripe_ptr(trans, k, p.ec, data_type,
1017 disk_sectors, flags);
1022 * There may be other dirty pointers in this extent, but
1023 * if so they're not required for mounting if we have an
1024 * erasure coded pointer in this extent:
1026 r.e.nr_required = 0;
1031 ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true);
1035 bch2_bkey_val_to_text(&PBUF(buf), c, k);
1036 bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
1044 static int bch2_mark_stripe(struct btree_trans *trans,
1045 struct bkey_s_c old, struct bkey_s_c new,
1048 bool gc = flags & BTREE_TRIGGER_GC;
1049 u64 journal_seq = trans->journal_res.seq;
1050 struct bch_fs *c = trans->c;
1051 u64 idx = new.k->p.offset;
1052 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1053 ? bkey_s_c_to_stripe(old).v : NULL;
1054 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1055 ? bkey_s_c_to_stripe(new).v : NULL;
1059 BUG_ON(gc && old_s);
1062 struct stripe *m = genradix_ptr(&c->stripes, idx);
1064 if (!m || (old_s && !m->alive)) {
1065 char buf1[200], buf2[200];
1067 bch2_bkey_val_to_text(&PBUF(buf1), c, old);
1068 bch2_bkey_val_to_text(&PBUF(buf2), c, new);
1069 bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
1071 "new %s", idx, buf1, buf2);
1072 bch2_inconsistent_error(c);
1077 spin_lock(&c->ec_stripes_heap_lock);
1078 bch2_stripes_heap_del(c, m, idx);
1079 spin_unlock(&c->ec_stripes_heap_lock);
1081 memset(m, 0, sizeof(*m));
1084 m->sectors = le16_to_cpu(new_s->sectors);
1085 m->algorithm = new_s->algorithm;
1086 m->nr_blocks = new_s->nr_blocks;
1087 m->nr_redundant = new_s->nr_redundant;
1088 m->blocks_nonempty = 0;
1090 for (i = 0; i < new_s->nr_blocks; i++)
1091 m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
1093 spin_lock(&c->ec_stripes_heap_lock);
1094 bch2_stripes_heap_update(c, m, idx);
1095 spin_unlock(&c->ec_stripes_heap_lock);
1098 struct gc_stripe *m =
1099 genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
1102 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
1107 * This will be wrong when we bring back runtime gc: we should
1108 * be unmarking the old key and then marking the new key
1111 m->sectors = le16_to_cpu(new_s->sectors);
1112 m->nr_blocks = new_s->nr_blocks;
1113 m->nr_redundant = new_s->nr_redundant;
1115 for (i = 0; i < new_s->nr_blocks; i++)
1116 m->ptrs[i] = new_s->ptrs[i];
1118 bch2_bkey_to_replicas(&m->r.e, new);
1121 * gc recalculates this field from stripe ptr
1124 memset(m->block_sectors, 0, sizeof(m->block_sectors));
1126 for (i = 0; i < new_s->nr_blocks; i++) {
1127 ret = mark_stripe_bucket(trans, new, i, journal_seq, flags);
1132 ret = update_replicas(c, new, &m->r.e,
1133 ((s64) m->sectors * m->nr_redundant),
1138 bch2_bkey_val_to_text(&PBUF(buf), c, new);
1139 bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
1147 static int bch2_mark_inode(struct btree_trans *trans,
1148 struct bkey_s_c old, struct bkey_s_c new,
1151 struct bch_fs *c = trans->c;
1152 struct bch_fs_usage __percpu *fs_usage;
1153 u64 journal_seq = trans->journal_res.seq;
1155 if (flags & BTREE_TRIGGER_INSERT) {
1156 struct bch_inode_v2 *v = (struct bch_inode_v2 *) new.v;
1158 BUG_ON(!journal_seq);
1159 BUG_ON(new.k->type != KEY_TYPE_inode_v2);
1161 v->bi_journal_seq = cpu_to_le64(journal_seq);
1164 if (flags & BTREE_TRIGGER_GC) {
1165 percpu_down_read(&c->mark_lock);
1168 fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
1169 fs_usage->nr_inodes += bkey_is_inode(new.k);
1170 fs_usage->nr_inodes -= bkey_is_inode(old.k);
1173 percpu_up_read(&c->mark_lock);
1178 static int bch2_mark_reservation(struct btree_trans *trans,
1179 struct bkey_s_c old, struct bkey_s_c new,
1182 struct bch_fs *c = trans->c;
1183 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
1184 struct bch_fs_usage __percpu *fs_usage;
1185 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1186 s64 sectors = (s64) k.k->size;
1188 BUG_ON(!(flags & BTREE_TRIGGER_GC));
1190 if (flags & BTREE_TRIGGER_OVERWRITE)
1192 sectors *= replicas;
1194 percpu_down_read(&c->mark_lock);
1197 fs_usage = fs_usage_ptr(c, trans->journal_res.seq, flags & BTREE_TRIGGER_GC);
1198 replicas = clamp_t(unsigned, replicas, 1,
1199 ARRAY_SIZE(fs_usage->persistent_reserved));
1201 fs_usage->reserved += sectors;
1202 fs_usage->persistent_reserved[replicas - 1] += sectors;
1205 percpu_up_read(&c->mark_lock);
1210 static s64 __bch2_mark_reflink_p(struct bch_fs *c, struct bkey_s_c_reflink_p p,
1211 u64 *idx, unsigned flags, size_t r_idx)
1213 struct reflink_gc *r;
1214 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1217 if (r_idx >= c->reflink_gc_nr)
1220 r = genradix_ptr(&c->reflink_gc_table, r_idx);
1221 if (*idx < r->offset - r->size)
1224 BUG_ON((s64) r->refcount + add < 0);
1234 * XXX: we're replacing the entire reflink pointer with an error
1235 * key, we should just be replacing the part that was missing:
1237 if (fsck_err(c, "%llu:%llu len %u points to nonexistent indirect extent %llu",
1238 p.k->p.inode, p.k->p.offset, p.k->size, *idx)) {
1239 struct bkey_i_error new;
1242 new.k.type = KEY_TYPE_error;
1244 new.k.size = p.k->size;
1245 ret = bch2_journal_key_insert(c, BTREE_ID_extents, 0, &new.k_i);
1251 static int bch2_mark_reflink_p(struct btree_trans *trans,
1252 struct bkey_s_c old, struct bkey_s_c new,
1255 struct bch_fs *c = trans->c;
1256 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
1257 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
1258 struct reflink_gc *ref;
1260 u64 idx = le64_to_cpu(p.v->idx);
1261 u64 end = le64_to_cpu(p.v->idx) + p.k->size;
1264 BUG_ON(!(flags & BTREE_TRIGGER_GC));
1266 if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) {
1267 idx -= le32_to_cpu(p.v->front_pad);
1268 end += le32_to_cpu(p.v->back_pad);
1272 r = c->reflink_gc_nr;
1274 m = l + (r - l) / 2;
1276 ref = genradix_ptr(&c->reflink_gc_table, m);
1277 if (ref->offset <= idx)
1283 while (idx < end && !ret)
1284 ret = __bch2_mark_reflink_p(c, p, &idx, flags, l++);
1289 int bch2_mark_key(struct btree_trans *trans,
1290 struct bkey_s_c old,
1291 struct bkey_s_c new,
1294 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
1296 switch (k.k->type) {
1297 case KEY_TYPE_alloc:
1298 case KEY_TYPE_alloc_v2:
1299 case KEY_TYPE_alloc_v3:
1300 return bch2_mark_alloc(trans, old, new, flags);
1301 case KEY_TYPE_btree_ptr:
1302 case KEY_TYPE_btree_ptr_v2:
1303 case KEY_TYPE_extent:
1304 case KEY_TYPE_reflink_v:
1305 return bch2_mark_extent(trans, old, new, flags);
1306 case KEY_TYPE_stripe:
1307 return bch2_mark_stripe(trans, old, new, flags);
1308 case KEY_TYPE_inode:
1309 case KEY_TYPE_inode_v2:
1310 return bch2_mark_inode(trans, old, new, flags);
1311 case KEY_TYPE_reservation:
1312 return bch2_mark_reservation(trans, old, new, flags);
1313 case KEY_TYPE_reflink_p:
1314 return bch2_mark_reflink_p(trans, old, new, flags);
1315 case KEY_TYPE_snapshot:
1316 return bch2_mark_snapshot(trans, old, new, flags);
1322 int bch2_mark_update(struct btree_trans *trans, struct btree_path *path,
1323 struct bkey_i *new, unsigned flags)
1325 struct bkey _deleted = KEY(0, 0, 0);
1326 struct bkey_s_c deleted = (struct bkey_s_c) { &_deleted, NULL };
1327 struct bkey_s_c old;
1328 struct bkey unpacked;
1331 _deleted.p = path->pos;
1333 if (unlikely(flags & BTREE_TRIGGER_NORUN))
1336 if (!btree_node_type_needs_gc(path->btree_id))
1339 old = bch2_btree_path_peek_slot(path, &unpacked);
1341 if (old.k->type == new->k.type &&
1342 ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
1343 ret = bch2_mark_key(trans, old, bkey_i_to_s_c(new),
1344 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1346 ret = bch2_mark_key(trans, deleted, bkey_i_to_s_c(new),
1347 BTREE_TRIGGER_INSERT|flags) ?:
1348 bch2_mark_key(trans, old, deleted,
1349 BTREE_TRIGGER_OVERWRITE|flags);
1355 static noinline __cold
1356 void fs_usage_apply_warn(struct btree_trans *trans,
1357 unsigned disk_res_sectors,
1358 s64 should_not_have_added)
1360 struct bch_fs *c = trans->c;
1361 struct btree_insert_entry *i;
1364 bch_err(c, "disk usage increased %lli more than %u sectors reserved",
1365 should_not_have_added, disk_res_sectors);
1367 trans_for_each_update(trans, i) {
1368 pr_err("while inserting");
1369 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1371 pr_err("overlapping with");
1375 struct bkey_s_c k = bch2_btree_path_peek_slot(i->path, &u);
1377 bch2_bkey_val_to_text(&PBUF(buf), c, k);
1380 struct bkey_cached *ck = (void *) i->path->l[0].b;
1383 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
1391 int bch2_trans_fs_usage_apply(struct btree_trans *trans,
1392 struct replicas_delta_list *deltas)
1394 struct bch_fs *c = trans->c;
1395 static int warned_disk_usage = 0;
1397 unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1398 struct replicas_delta *d = deltas->d, *d2;
1399 struct replicas_delta *top = (void *) deltas->d + deltas->used;
1400 struct bch_fs_usage *dst;
1401 s64 added = 0, should_not_have_added;
1404 percpu_down_read(&c->mark_lock);
1406 dst = fs_usage_ptr(c, trans->journal_res.seq, false);
1408 for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
1409 switch (d->r.data_type) {
1410 case BCH_DATA_btree:
1412 case BCH_DATA_parity:
1416 if (__update_replicas(c, dst, &d->r, d->delta))
1420 dst->nr_inodes += deltas->nr_inodes;
1422 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
1423 added += deltas->persistent_reserved[i];
1424 dst->reserved += deltas->persistent_reserved[i];
1425 dst->persistent_reserved[i] += deltas->persistent_reserved[i];
1429 * Not allowed to reduce sectors_available except by getting a
1432 should_not_have_added = added - (s64) disk_res_sectors;
1433 if (unlikely(should_not_have_added > 0)) {
1434 u64 old, new, v = atomic64_read(&c->sectors_available);
1438 new = max_t(s64, 0, old - should_not_have_added);
1439 } while ((v = atomic64_cmpxchg(&c->sectors_available,
1442 added -= should_not_have_added;
1447 trans->disk_res->sectors -= added;
1448 this_cpu_sub(*c->online_reserved, added);
1452 percpu_up_read(&c->mark_lock);
1454 if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
1455 fs_usage_apply_warn(trans, disk_res_sectors, should_not_have_added);
1458 /* revert changes: */
1459 for (d2 = deltas->d; d2 != d; d2 = replicas_delta_next(d2))
1460 BUG_ON(__update_replicas(c, dst, &d2->r, -d2->delta));
1463 percpu_up_read(&c->mark_lock);
1469 static int bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
1470 const struct bch_extent_ptr *ptr,
1471 struct bkey_alloc_unpacked *u)
1473 struct bch_fs *c = trans->c;
1474 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1475 struct bpos pos = POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
1476 struct bkey_i *update = btree_trans_peek_updates(trans, BTREE_ID_alloc, pos);
1479 bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos,
1481 BTREE_ITER_CACHED_NOFILL|
1483 ret = bch2_btree_iter_traverse(iter);
1485 bch2_trans_iter_exit(trans, iter);
1489 *u = update && !bpos_cmp(update->k.p, pos)
1490 ? bch2_alloc_unpack(bkey_i_to_s_c(update))
1491 : alloc_mem_to_key(c, iter);
1496 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1497 struct bkey_s_c k, struct extent_ptr_decoded p,
1498 s64 sectors, enum bch_data_type data_type)
1500 struct btree_iter iter;
1501 struct bkey_alloc_unpacked u;
1504 ret = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
1508 ret = __mark_pointer(trans, k, &p.ptr, sectors, data_type,
1509 u.gen, &u.data_type,
1510 &u.dirty_sectors, &u.cached_sectors);
1514 ret = bch2_alloc_write(trans, &iter, &u, 0);
1518 bch2_trans_iter_exit(trans, &iter);
1522 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1523 struct extent_ptr_decoded p,
1524 s64 sectors, enum bch_data_type data_type)
1526 struct bch_fs *c = trans->c;
1527 struct btree_iter iter;
1529 struct bkey_i_stripe *s;
1530 struct bch_replicas_padded r;
1533 bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes, POS(0, p.ec.idx),
1535 BTREE_ITER_WITH_UPDATES);
1536 k = bch2_btree_iter_peek_slot(&iter);
1541 if (k.k->type != KEY_TYPE_stripe) {
1542 bch2_fs_inconsistent(c,
1543 "pointer to nonexistent stripe %llu",
1545 bch2_inconsistent_error(c);
1550 if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) {
1551 bch2_fs_inconsistent(c,
1552 "stripe pointer doesn't match stripe %llu",
1558 s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1559 ret = PTR_ERR_OR_ZERO(s);
1563 bkey_reassemble(&s->k_i, k);
1564 stripe_blockcount_set(&s->v, p.ec.block,
1565 stripe_blockcount_get(&s->v, p.ec.block) +
1568 ret = bch2_trans_update(trans, &iter, &s->k_i, 0);
1572 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1573 r.e.data_type = data_type;
1574 update_replicas_list(trans, &r.e, sectors);
1576 bch2_trans_iter_exit(trans, &iter);
1580 static int bch2_trans_mark_extent(struct btree_trans *trans,
1581 struct bkey_s_c k, unsigned flags)
1583 struct bch_fs *c = trans->c;
1584 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1585 const union bch_extent_entry *entry;
1586 struct extent_ptr_decoded p;
1587 struct bch_replicas_padded r;
1588 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
1591 s64 sectors = bkey_is_btree_ptr(k.k)
1594 s64 dirty_sectors = 0;
1598 r.e.data_type = data_type;
1600 r.e.nr_required = 1;
1602 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1603 s64 disk_sectors = ptr_disk_sectors(sectors, p);
1605 if (flags & BTREE_TRIGGER_OVERWRITE)
1606 disk_sectors = -disk_sectors;
1608 ret = bch2_trans_mark_pointer(trans, k, p,
1609 disk_sectors, data_type);
1617 update_cached_sectors_list(trans, p.ptr.dev,
1619 } else if (!p.has_ec) {
1620 dirty_sectors += disk_sectors;
1621 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1623 ret = bch2_trans_mark_stripe_ptr(trans, p,
1624 disk_sectors, data_type);
1628 r.e.nr_required = 0;
1633 update_replicas_list(trans, &r.e, dirty_sectors);
1638 static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
1639 struct bkey_s_c_stripe s,
1640 unsigned idx, bool deleting)
1642 struct bch_fs *c = trans->c;
1643 const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
1644 struct btree_iter iter;
1645 struct bkey_alloc_unpacked u;
1646 enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
1647 ? BCH_DATA_parity : 0;
1648 s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
1654 ret = bch2_trans_start_alloc_update(trans, &iter, ptr, &u);
1658 ret = check_bucket_ref(c, s.s_c, ptr, sectors, data_type,
1660 u.dirty_sectors, u.cached_sectors);
1665 if (bch2_fs_inconsistent_on(u.stripe ||
1666 u.stripe_redundancy, c,
1667 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
1668 iter.pos.inode, iter.pos.offset, u.gen,
1669 bch2_data_types[u.data_type],
1671 u.stripe, s.k->p.offset)) {
1676 if (bch2_fs_inconsistent_on(data_type && u.dirty_sectors, c,
1677 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
1678 iter.pos.inode, iter.pos.offset, u.gen,
1679 bch2_data_types[u.data_type],
1686 u.stripe = s.k->p.offset;
1687 u.stripe_redundancy = s.v->nr_redundant;
1689 if (bch2_fs_inconsistent_on(u.stripe != s.k->p.offset ||
1690 u.stripe_redundancy != s.v->nr_redundant, c,
1691 "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
1692 iter.pos.inode, iter.pos.offset, u.gen,
1693 s.k->p.offset, u.stripe)) {
1699 u.stripe_redundancy = 0;
1702 u.dirty_sectors += sectors;
1704 u.data_type = !deleting ? data_type : 0;
1706 ret = bch2_alloc_write(trans, &iter, &u, 0);
1710 bch2_trans_iter_exit(trans, &iter);
1714 static int bch2_trans_mark_stripe(struct btree_trans *trans,
1715 struct bkey_s_c old, struct bkey_s_c new,
1718 struct bkey_s_c_stripe old_s = { .k = NULL };
1719 struct bkey_s_c_stripe new_s = { .k = NULL };
1720 struct bch_replicas_padded r;
1721 unsigned i, nr_blocks;
1724 if (old.k->type == KEY_TYPE_stripe)
1725 old_s = bkey_s_c_to_stripe(old);
1726 if (new.k->type == KEY_TYPE_stripe)
1727 new_s = bkey_s_c_to_stripe(new);
1730 * If the pointers aren't changing, we don't need to do anything:
1732 if (new_s.k && old_s.k &&
1733 new_s.v->nr_blocks == old_s.v->nr_blocks &&
1734 new_s.v->nr_redundant == old_s.v->nr_redundant &&
1735 !memcmp(old_s.v->ptrs, new_s.v->ptrs,
1736 new_s.v->nr_blocks * sizeof(struct bch_extent_ptr)))
1739 BUG_ON(new_s.k && old_s.k &&
1740 (new_s.v->nr_blocks != old_s.v->nr_blocks ||
1741 new_s.v->nr_redundant != old_s.v->nr_redundant));
1743 nr_blocks = new_s.k ? new_s.v->nr_blocks : old_s.v->nr_blocks;
1746 s64 sectors = le16_to_cpu(new_s.v->sectors);
1748 bch2_bkey_to_replicas(&r.e, new);
1749 update_replicas_list(trans, &r.e, sectors * new_s.v->nr_redundant);
1753 s64 sectors = -((s64) le16_to_cpu(old_s.v->sectors));
1755 bch2_bkey_to_replicas(&r.e, old);
1756 update_replicas_list(trans, &r.e, sectors * old_s.v->nr_redundant);
1759 for (i = 0; i < nr_blocks; i++) {
1760 if (new_s.k && old_s.k &&
1761 !memcmp(&new_s.v->ptrs[i],
1763 sizeof(new_s.v->ptrs[i])))
1767 ret = bch2_trans_mark_stripe_bucket(trans, new_s, i, false);
1773 ret = bch2_trans_mark_stripe_bucket(trans, old_s, i, true);
1782 static int bch2_trans_mark_inode(struct btree_trans *trans,
1783 struct bkey_s_c old,
1784 struct bkey_s_c new,
1787 int nr = bkey_is_inode(new.k) - bkey_is_inode(old.k);
1790 struct replicas_delta_list *d =
1791 replicas_deltas_realloc(trans, 0);
1798 static int bch2_trans_mark_reservation(struct btree_trans *trans,
1799 struct bkey_s_c k, unsigned flags)
1801 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1802 s64 sectors = (s64) k.k->size;
1803 struct replicas_delta_list *d;
1805 if (flags & BTREE_TRIGGER_OVERWRITE)
1807 sectors *= replicas;
1809 d = replicas_deltas_realloc(trans, 0);
1811 replicas = clamp_t(unsigned, replicas, 1,
1812 ARRAY_SIZE(d->persistent_reserved));
1814 d->persistent_reserved[replicas - 1] += sectors;
1818 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1819 struct bkey_s_c_reflink_p p,
1820 u64 *idx, unsigned flags)
1822 struct bch_fs *c = trans->c;
1823 struct btree_iter iter;
1827 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1831 bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx),
1833 BTREE_ITER_WITH_UPDATES);
1834 k = bch2_btree_iter_peek_slot(&iter);
1839 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1840 ret = PTR_ERR_OR_ZERO(n);
1844 bkey_reassemble(n, k);
1846 refcount = bkey_refcount(n);
1848 bch2_bkey_val_to_text(&PBUF(buf), c, p.s_c);
1849 bch2_fs_inconsistent(c,
1850 "nonexistent indirect extent at %llu while marking\n %s",
1856 if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
1857 bch2_bkey_val_to_text(&PBUF(buf), c, p.s_c);
1858 bch2_fs_inconsistent(c,
1859 "indirect extent refcount underflow at %llu while marking\n %s",
1865 if (flags & BTREE_TRIGGER_INSERT) {
1866 struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
1869 pad = max_t(s64, le32_to_cpu(v->front_pad),
1870 le64_to_cpu(v->idx) - bkey_start_offset(k.k));
1871 BUG_ON(pad > U32_MAX);
1872 v->front_pad = cpu_to_le32(pad);
1874 pad = max_t(s64, le32_to_cpu(v->back_pad),
1875 k.k->p.offset - p.k->size - le64_to_cpu(v->idx));
1876 BUG_ON(pad > U32_MAX);
1877 v->back_pad = cpu_to_le32(pad);
1880 le64_add_cpu(refcount, add);
1883 n->k.type = KEY_TYPE_deleted;
1884 set_bkey_val_u64s(&n->k, 0);
1887 bch2_btree_iter_set_pos_to_extent_start(&iter);
1888 ret = bch2_trans_update(trans, &iter, n, 0);
1892 *idx = k.k->p.offset;
1894 bch2_trans_iter_exit(trans, &iter);
1898 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1899 struct bkey_s_c k, unsigned flags)
1901 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
1905 if (flags & BTREE_TRIGGER_INSERT) {
1906 struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
1908 v->front_pad = v->back_pad = 0;
1911 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
1912 end_idx = le64_to_cpu(p.v->idx) + p.k->size +
1913 le32_to_cpu(p.v->back_pad);
1915 while (idx < end_idx && !ret)
1916 ret = __bch2_trans_mark_reflink_p(trans, p, &idx, flags);
1921 int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c old,
1922 struct bkey_s_c new, unsigned flags)
1924 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
1926 switch (k.k->type) {
1927 case KEY_TYPE_btree_ptr:
1928 case KEY_TYPE_btree_ptr_v2:
1929 case KEY_TYPE_extent:
1930 case KEY_TYPE_reflink_v:
1931 return bch2_trans_mark_extent(trans, k, flags);
1932 case KEY_TYPE_stripe:
1933 return bch2_trans_mark_stripe(trans, old, new, flags);
1934 case KEY_TYPE_inode:
1935 case KEY_TYPE_inode_v2:
1936 return bch2_trans_mark_inode(trans, old, new, flags);
1937 case KEY_TYPE_reservation:
1938 return bch2_trans_mark_reservation(trans, k, flags);
1939 case KEY_TYPE_reflink_p:
1940 return bch2_trans_mark_reflink_p(trans, k, flags);
1946 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1947 struct bch_dev *ca, size_t b,
1948 enum bch_data_type type,
1951 struct bch_fs *c = trans->c;
1952 struct btree_iter iter;
1953 struct bkey_alloc_unpacked u;
1954 struct bch_extent_ptr ptr = {
1956 .offset = bucket_to_sector(ca, b),
1961 * Backup superblock might be past the end of our normal usable space:
1963 if (b >= ca->mi.nbuckets)
1966 ret = bch2_trans_start_alloc_update(trans, &iter, &ptr, &u);
1970 if (u.data_type && u.data_type != type) {
1971 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1972 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
1974 iter.pos.inode, iter.pos.offset, u.gen,
1975 bch2_data_types[u.data_type],
1976 bch2_data_types[type],
1977 bch2_data_types[type]);
1983 u.dirty_sectors = sectors;
1985 ret = bch2_alloc_write(trans, &iter, &u, 0);
1989 bch2_trans_iter_exit(trans, &iter);
1993 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1994 struct bch_dev *ca, size_t b,
1995 enum bch_data_type type,
1998 return __bch2_trans_do(trans, NULL, NULL, 0,
1999 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
2002 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
2005 enum bch_data_type type,
2006 u64 *bucket, unsigned *bucket_sectors)
2009 u64 b = sector_to_bucket(ca, start);
2011 min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
2013 if (b != *bucket && *bucket_sectors) {
2014 int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
2015 type, *bucket_sectors);
2019 *bucket_sectors = 0;
2023 *bucket_sectors += sectors;
2025 } while (start < end);
2030 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
2033 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
2035 unsigned i, bucket_sectors = 0;
2038 for (i = 0; i < layout->nr_superblocks; i++) {
2039 u64 offset = le64_to_cpu(layout->sb_offset[i]);
2041 if (offset == BCH_SB_SECTOR) {
2042 ret = bch2_trans_mark_metadata_sectors(trans, ca,
2044 BCH_DATA_sb, &bucket, &bucket_sectors);
2049 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
2050 offset + (1 << layout->sb_max_size_bits),
2051 BCH_DATA_sb, &bucket, &bucket_sectors);
2056 if (bucket_sectors) {
2057 ret = bch2_trans_mark_metadata_bucket(trans, ca,
2058 bucket, BCH_DATA_sb, bucket_sectors);
2063 for (i = 0; i < ca->journal.nr; i++) {
2064 ret = bch2_trans_mark_metadata_bucket(trans, ca,
2065 ca->journal.buckets[i],
2066 BCH_DATA_journal, ca->mi.bucket_size);
2074 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
2076 return bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
2077 __bch2_trans_mark_dev_sb(&trans, ca));
2080 /* Disk reservations: */
2082 #define SECTORS_CACHE 1024
2084 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
2085 u64 sectors, int flags)
2087 struct bch_fs_pcpu *pcpu;
2089 s64 sectors_available;
2092 percpu_down_read(&c->mark_lock);
2094 pcpu = this_cpu_ptr(c->pcpu);
2096 if (sectors <= pcpu->sectors_available)
2099 v = atomic64_read(&c->sectors_available);
2102 get = min((u64) sectors + SECTORS_CACHE, old);
2104 if (get < sectors) {
2108 } while ((v = atomic64_cmpxchg(&c->sectors_available,
2109 old, old - get)) != old);
2111 pcpu->sectors_available += get;
2114 pcpu->sectors_available -= sectors;
2115 this_cpu_add(*c->online_reserved, sectors);
2116 res->sectors += sectors;
2119 percpu_up_read(&c->mark_lock);
2123 mutex_lock(&c->sectors_available_lock);
2125 percpu_u64_set(&c->pcpu->sectors_available, 0);
2126 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
2128 if (sectors <= sectors_available ||
2129 (flags & BCH_DISK_RESERVATION_NOFAIL)) {
2130 atomic64_set(&c->sectors_available,
2131 max_t(s64, 0, sectors_available - sectors));
2132 this_cpu_add(*c->online_reserved, sectors);
2133 res->sectors += sectors;
2136 atomic64_set(&c->sectors_available, sectors_available);
2140 mutex_unlock(&c->sectors_available_lock);
2141 percpu_up_read(&c->mark_lock);
2146 /* Startup/shutdown: */
2148 static void buckets_free_rcu(struct rcu_head *rcu)
2150 struct bucket_array *buckets =
2151 container_of(rcu, struct bucket_array, rcu);
2154 sizeof(struct bucket_array) +
2155 buckets->nbuckets * sizeof(struct bucket));
2158 static void bucket_gens_free_rcu(struct rcu_head *rcu)
2160 struct bucket_gens *buckets =
2161 container_of(rcu, struct bucket_gens, rcu);
2163 kvpfree(buckets, sizeof(struct bucket_array) + buckets->nbuckets);
2166 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2168 struct bucket_array *buckets = NULL, *old_buckets = NULL;
2169 struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
2170 unsigned long *buckets_nouse = NULL;
2171 alloc_fifo free[RESERVE_NR];
2172 alloc_fifo free_inc;
2173 alloc_heap alloc_heap;
2175 size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
2176 ca->mi.bucket_size / btree_sectors(c));
2177 /* XXX: these should be tunable */
2178 size_t reserve_none = max_t(size_t, 1, nbuckets >> 9);
2179 size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 6);
2180 size_t free_inc_nr = max(max_t(size_t, 1, nbuckets >> 12),
2182 bool resize = ca->buckets[0] != NULL;
2186 memset(&free, 0, sizeof(free));
2187 memset(&free_inc, 0, sizeof(free_inc));
2188 memset(&alloc_heap, 0, sizeof(alloc_heap));
2190 if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
2191 nbuckets * sizeof(struct bucket),
2192 GFP_KERNEL|__GFP_ZERO)) ||
2193 !(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
2194 GFP_KERNEL|__GFP_ZERO)) ||
2195 !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2196 sizeof(unsigned long),
2197 GFP_KERNEL|__GFP_ZERO)) ||
2198 !init_fifo(&free[RESERVE_MOVINGGC],
2199 copygc_reserve, GFP_KERNEL) ||
2200 !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
2201 !init_fifo(&free_inc, free_inc_nr, GFP_KERNEL) ||
2202 !init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
2205 buckets->first_bucket = ca->mi.first_bucket;
2206 buckets->nbuckets = nbuckets;
2207 bucket_gens->first_bucket = ca->mi.first_bucket;
2208 bucket_gens->nbuckets = nbuckets;
2210 bch2_copygc_stop(c);
2213 down_write(&c->gc_lock);
2214 down_write(&ca->bucket_lock);
2215 percpu_down_write(&c->mark_lock);
2218 old_buckets = bucket_array(ca);
2219 old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
2222 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
2226 n * sizeof(struct bucket));
2227 memcpy(bucket_gens->b,
2230 memcpy(buckets_nouse,
2232 BITS_TO_LONGS(n) * sizeof(unsigned long));
2235 rcu_assign_pointer(ca->buckets[0], buckets);
2236 rcu_assign_pointer(ca->bucket_gens, bucket_gens);
2237 buckets = old_buckets;
2238 bucket_gens = old_bucket_gens;
2240 swap(ca->buckets_nouse, buckets_nouse);
2243 percpu_up_write(&c->mark_lock);
2244 up_write(&c->gc_lock);
2247 spin_lock(&c->freelist_lock);
2248 for (i = 0; i < RESERVE_NR; i++) {
2249 fifo_move(&free[i], &ca->free[i]);
2250 swap(ca->free[i], free[i]);
2252 fifo_move(&free_inc, &ca->free_inc);
2253 swap(ca->free_inc, free_inc);
2254 spin_unlock(&c->freelist_lock);
2256 /* with gc lock held, alloc_heap can't be in use: */
2257 swap(ca->alloc_heap, alloc_heap);
2259 nbuckets = ca->mi.nbuckets;
2262 up_write(&ca->bucket_lock);
2266 free_heap(&alloc_heap);
2267 free_fifo(&free_inc);
2268 for (i = 0; i < RESERVE_NR; i++)
2269 free_fifo(&free[i]);
2270 kvpfree(buckets_nouse,
2271 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2273 call_rcu(&old_buckets->rcu, bucket_gens_free_rcu);
2275 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2280 void bch2_dev_buckets_free(struct bch_dev *ca)
2284 free_heap(&ca->alloc_heap);
2285 free_fifo(&ca->free_inc);
2286 for (i = 0; i < RESERVE_NR; i++)
2287 free_fifo(&ca->free[i]);
2288 kvpfree(ca->buckets_nouse,
2289 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2290 kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2291 sizeof(struct bucket_array) +
2292 ca->mi.nbuckets * sizeof(struct bucket));
2294 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
2295 free_percpu(ca->usage[i]);
2296 kfree(ca->usage_base);
2299 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2303 ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
2304 if (!ca->usage_base)
2307 for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
2308 ca->usage[i] = alloc_percpu(struct bch_dev_usage);
2313 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;