1 // SPDX-License-Identifier: GPL-2.0
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
9 #include "alloc_background.h"
12 #include "btree_update.h"
14 #include "buckets_waiting_for_journal.h"
22 #include "subvolume.h"
24 #include <linux/preempt.h>
25 #include <trace/events/bcachefs.h>
27 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
28 enum bch_data_type data_type,
33 fs_usage->btree += sectors;
37 fs_usage->data += sectors;
40 fs_usage->cached += sectors;
47 void bch2_fs_usage_initialize(struct bch_fs *c)
49 struct bch_fs_usage *usage;
53 percpu_down_write(&c->mark_lock);
54 usage = c->usage_base;
56 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
57 bch2_fs_usage_acc_to_base(c, i);
59 for (i = 0; i < BCH_REPLICAS_MAX; i++)
60 usage->reserved += usage->persistent_reserved[i];
62 for (i = 0; i < c->replicas.nr; i++) {
63 struct bch_replicas_entry *e =
64 cpu_replicas_entry(&c->replicas, i);
66 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
69 for_each_member_device(ca, c, i) {
70 struct bch_dev_usage dev = bch2_dev_usage_read(ca);
72 usage->hidden += (dev.d[BCH_DATA_sb].buckets +
73 dev.d[BCH_DATA_journal].buckets) *
77 percpu_up_write(&c->mark_lock);
80 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
84 BUG_ON(!gc && !journal_seq);
86 return this_cpu_ptr(gc
88 : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
91 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
93 struct bch_fs *c = ca->fs;
94 struct bch_dev_usage ret;
95 unsigned seq, i, u64s = dev_usage_u64s();
98 seq = read_seqcount_begin(&c->usage_lock);
99 memcpy(&ret, ca->usage_base, u64s * sizeof(u64));
100 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
101 acc_u64s_percpu((u64 *) &ret, (u64 __percpu *) ca->usage[i], u64s);
102 } while (read_seqcount_retry(&c->usage_lock, seq));
107 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
108 unsigned journal_seq,
111 percpu_rwsem_assert_held(&c->mark_lock);
112 BUG_ON(!gc && !journal_seq);
114 return this_cpu_ptr(gc
116 : c->usage[journal_seq & JOURNAL_BUF_MASK]);
119 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
121 ssize_t offset = v - (u64 *) c->usage_base;
125 BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
126 percpu_rwsem_assert_held(&c->mark_lock);
129 seq = read_seqcount_begin(&c->usage_lock);
132 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
133 ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
134 } while (read_seqcount_retry(&c->usage_lock, seq));
139 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
141 struct bch_fs_usage_online *ret;
142 unsigned seq, i, u64s;
144 percpu_down_read(&c->mark_lock);
146 ret = kmalloc(sizeof(struct bch_fs_usage_online) +
147 sizeof(u64) * c->replicas.nr, GFP_NOFS);
148 if (unlikely(!ret)) {
149 percpu_up_read(&c->mark_lock);
153 ret->online_reserved = percpu_u64_get(c->online_reserved);
155 u64s = fs_usage_u64s(c);
157 seq = read_seqcount_begin(&c->usage_lock);
158 memcpy(&ret->u, c->usage_base, u64s * sizeof(u64));
159 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
160 acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], u64s);
161 } while (read_seqcount_retry(&c->usage_lock, seq));
166 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
169 unsigned i, u64s = fs_usage_u64s(c);
171 BUG_ON(idx >= ARRAY_SIZE(c->usage));
174 write_seqcount_begin(&c->usage_lock);
176 acc_u64s_percpu((u64 *) c->usage_base,
177 (u64 __percpu *) c->usage[idx], u64s);
178 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
181 for_each_member_device_rcu(ca, c, i, NULL) {
182 u64s = dev_usage_u64s();
184 acc_u64s_percpu((u64 *) ca->usage_base,
185 (u64 __percpu *) ca->usage[idx], u64s);
186 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
190 write_seqcount_end(&c->usage_lock);
194 void bch2_fs_usage_to_text(struct printbuf *out,
196 struct bch_fs_usage_online *fs_usage)
200 pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
202 pr_buf(out, "hidden:\t\t\t\t%llu\n",
204 pr_buf(out, "data:\t\t\t\t%llu\n",
206 pr_buf(out, "cached:\t\t\t\t%llu\n",
208 pr_buf(out, "reserved:\t\t\t%llu\n",
209 fs_usage->u.reserved);
210 pr_buf(out, "nr_inodes:\t\t\t%llu\n",
211 fs_usage->u.nr_inodes);
212 pr_buf(out, "online reserved:\t\t%llu\n",
213 fs_usage->online_reserved);
216 i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
218 pr_buf(out, "%u replicas:\n", i + 1);
219 pr_buf(out, "\treserved:\t\t%llu\n",
220 fs_usage->u.persistent_reserved[i]);
223 for (i = 0; i < c->replicas.nr; i++) {
224 struct bch_replicas_entry *e =
225 cpu_replicas_entry(&c->replicas, i);
228 bch2_replicas_entry_to_text(out, e);
229 pr_buf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
233 static u64 reserve_factor(u64 r)
235 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
238 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
240 return min(fs_usage->u.hidden +
243 reserve_factor(fs_usage->u.reserved +
244 fs_usage->online_reserved),
248 static struct bch_fs_usage_short
249 __bch2_fs_usage_read_short(struct bch_fs *c)
251 struct bch_fs_usage_short ret;
254 ret.capacity = c->capacity -
255 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
257 data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
258 bch2_fs_usage_read_one(c, &c->usage_base->btree);
259 reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
260 percpu_u64_get(c->online_reserved);
262 ret.used = min(ret.capacity, data + reserve_factor(reserved));
263 ret.free = ret.capacity - ret.used;
265 ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
270 struct bch_fs_usage_short
271 bch2_fs_usage_read_short(struct bch_fs *c)
273 struct bch_fs_usage_short ret;
275 percpu_down_read(&c->mark_lock);
276 ret = __bch2_fs_usage_read_short(c);
277 percpu_up_read(&c->mark_lock);
282 static inline int is_unavailable_bucket(struct bucket_mark m)
284 return !is_available_bucket(m);
287 static inline int bucket_sectors_fragmented(struct bch_dev *ca,
288 struct bucket_mark m)
290 return m.dirty_sectors
291 ? max(0, (int) ca->mi.bucket_size - (int) m.dirty_sectors)
295 static inline int is_stripe_data_bucket(struct bucket_mark m)
297 return m.stripe && m.data_type != BCH_DATA_parity;
300 static inline enum bch_data_type bucket_type(struct bucket_mark m)
302 return m.cached_sectors && !m.dirty_sectors
307 static inline void account_bucket(struct bch_fs_usage *fs_usage,
308 struct bch_dev_usage *dev_usage,
309 enum bch_data_type type,
312 if (type == BCH_DATA_sb || type == BCH_DATA_journal)
313 fs_usage->hidden += size;
315 dev_usage->d[type].buckets += nr;
318 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
319 struct bucket_mark old, struct bucket_mark new,
320 u64 journal_seq, bool gc)
322 struct bch_fs_usage *fs_usage;
323 struct bch_dev_usage *u;
326 fs_usage = fs_usage_ptr(c, journal_seq, gc);
327 u = dev_usage_ptr(ca, journal_seq, gc);
329 if (bucket_type(old))
330 account_bucket(fs_usage, u, bucket_type(old),
331 -1, -ca->mi.bucket_size);
333 if (bucket_type(new))
334 account_bucket(fs_usage, u, bucket_type(new),
335 1, ca->mi.bucket_size);
337 u->buckets_ec += (int) new.stripe - (int) old.stripe;
338 u->buckets_unavailable +=
339 is_unavailable_bucket(new) - is_unavailable_bucket(old);
341 u->d[old.data_type].sectors -= old.dirty_sectors;
342 u->d[new.data_type].sectors += new.dirty_sectors;
343 u->d[BCH_DATA_cached].sectors +=
344 (int) new.cached_sectors - (int) old.cached_sectors;
346 u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old);
347 u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new);
351 if (!is_available_bucket(old) && is_available_bucket(new))
352 bch2_wake_allocator(ca);
355 static inline int __update_replicas(struct bch_fs *c,
356 struct bch_fs_usage *fs_usage,
357 struct bch_replicas_entry *r,
360 int idx = bch2_replicas_entry_idx(c, r);
365 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
366 fs_usage->replicas[idx] += sectors;
370 static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
371 struct bch_replicas_entry *r, s64 sectors,
372 unsigned journal_seq, bool gc)
374 struct bch_fs_usage __percpu *fs_usage;
378 percpu_down_read(&c->mark_lock);
380 idx = bch2_replicas_entry_idx(c, r);
382 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
383 fsck_err(c, "no replicas entry\n"
385 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))) {
386 percpu_up_read(&c->mark_lock);
387 ret = bch2_mark_replicas(c, r);
391 percpu_down_read(&c->mark_lock);
392 idx = bch2_replicas_entry_idx(c, r);
400 fs_usage = fs_usage_ptr(c, journal_seq, gc);
401 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
402 fs_usage->replicas[idx] += sectors;
406 percpu_up_read(&c->mark_lock);
410 static inline int update_cached_sectors(struct bch_fs *c,
412 unsigned dev, s64 sectors,
413 unsigned journal_seq, bool gc)
415 struct bch_replicas_padded r;
417 bch2_replicas_entry_cached(&r.e, dev);
419 return update_replicas(c, k, &r.e, sectors, journal_seq, gc);
422 static struct replicas_delta_list *
423 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
425 struct replicas_delta_list *d = trans->fs_usage_deltas;
426 unsigned new_size = d ? (d->size + more) * 2 : 128;
427 unsigned alloc_size = sizeof(*d) + new_size;
429 WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
431 if (!d || d->used + more > d->size) {
432 d = krealloc(d, alloc_size, GFP_NOIO|__GFP_ZERO);
434 BUG_ON(!d && alloc_size > REPLICAS_DELTA_LIST_MAX);
437 d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOIO);
438 memset(d, 0, REPLICAS_DELTA_LIST_MAX);
440 if (trans->fs_usage_deltas)
441 memcpy(d, trans->fs_usage_deltas,
442 trans->fs_usage_deltas->size + sizeof(*d));
444 new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
445 kfree(trans->fs_usage_deltas);
449 trans->fs_usage_deltas = d;
454 static inline void update_replicas_list(struct btree_trans *trans,
455 struct bch_replicas_entry *r,
458 struct replicas_delta_list *d;
459 struct replicas_delta *n;
465 b = replicas_entry_bytes(r) + 8;
466 d = replicas_deltas_realloc(trans, b);
468 n = (void *) d->d + d->used;
470 memcpy(&n->r, r, replicas_entry_bytes(r));
471 bch2_replicas_entry_sort(&n->r);
475 static inline void update_cached_sectors_list(struct btree_trans *trans,
476 unsigned dev, s64 sectors)
478 struct bch_replicas_padded r;
480 bch2_replicas_entry_cached(&r.e, dev);
482 update_replicas_list(trans, &r.e, sectors);
485 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
486 size_t b, bool owned_by_allocator)
488 struct bucket *g = bucket(ca, b);
489 struct bucket_mark old, new;
491 old = bucket_cmpxchg(g, new, ({
492 new.owned_by_allocator = owned_by_allocator;
495 BUG_ON(owned_by_allocator == old.owned_by_allocator);
498 static int bch2_mark_alloc(struct btree_trans *trans,
499 struct bkey_s_c old, struct bkey_s_c new,
502 bool gc = flags & BTREE_TRIGGER_GC;
503 u64 journal_seq = trans->journal_res.seq;
504 struct bch_fs *c = trans->c;
505 struct bkey_alloc_unpacked old_u = bch2_alloc_unpack(old);
506 struct bkey_alloc_unpacked new_u = bch2_alloc_unpack(new);
509 struct bucket_mark old_m, m;
513 * alloc btree is read in by bch2_alloc_read, not gc:
515 if ((flags & BTREE_TRIGGER_GC) &&
516 !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
519 if ((flags & BTREE_TRIGGER_INSERT) &&
520 !old_u.data_type != !new_u.data_type &&
521 new.k->type == KEY_TYPE_alloc_v3) {
522 struct bch_alloc_v3 *v = (struct bch_alloc_v3 *) new.v;
523 u64 old_journal_seq = le64_to_cpu(v->journal_seq);
525 BUG_ON(!journal_seq);
528 * If the btree updates referring to a bucket weren't flushed
529 * before the bucket became empty again, then the we don't have
530 * to wait on a journal flush before we can reuse the bucket:
532 new_u.journal_seq = !new_u.data_type &&
533 (journal_seq == old_journal_seq ||
534 bch2_journal_noflush_seq(&c->journal, old_journal_seq))
536 v->journal_seq = cpu_to_le64(new_u.journal_seq);
539 if (old_u.data_type && !new_u.data_type && new_u.journal_seq) {
540 ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
541 c->journal.flushed_seq_ondisk,
542 new_u.dev, new_u.bucket,
545 bch2_fs_fatal_error(c,
546 "error setting bucket_needs_journal_commit: %i", ret);
551 ca = bch_dev_bkey_exists(c, new_u.dev);
553 if (new_u.bucket >= ca->mi.nbuckets)
556 percpu_down_read(&c->mark_lock);
557 if (!gc && new_u.gen != old_u.gen)
558 *bucket_gen(ca, new_u.bucket) = new_u.gen;
560 g = __bucket(ca, new_u.bucket, gc);
562 old_m = bucket_cmpxchg(g, m, ({
564 m.data_type = new_u.data_type;
565 m.dirty_sectors = new_u.dirty_sectors;
566 m.cached_sectors = new_u.cached_sectors;
567 m.stripe = new_u.stripe != 0;
570 bch2_dev_usage_update(c, ca, old_m, m, journal_seq, gc);
572 g->io_time[READ] = new_u.read_time;
573 g->io_time[WRITE] = new_u.write_time;
574 g->oldest_gen = new_u.oldest_gen;
576 g->stripe = new_u.stripe;
577 g->stripe_redundancy = new_u.stripe_redundancy;
578 percpu_up_read(&c->mark_lock);
581 * need to know if we're getting called from the invalidate path or
585 if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
586 old_m.cached_sectors) {
587 ret = update_cached_sectors(c, new, ca->dev_idx,
588 -old_m.cached_sectors,
591 bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors");
595 trace_invalidate(ca, bucket_to_sector(ca, new_u.bucket),
596 old_m.cached_sectors);
602 #define checked_add(a, b) \
604 unsigned _res = (unsigned) (a) + (b); \
605 bool overflow = _res > U16_MAX; \
612 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
613 size_t b, enum bch_data_type data_type,
614 unsigned sectors, struct gc_pos pos,
618 struct bucket_mark old, new;
621 BUG_ON(!(flags & BTREE_TRIGGER_GC));
622 BUG_ON(data_type != BCH_DATA_sb &&
623 data_type != BCH_DATA_journal);
626 * Backup superblock might be past the end of our normal usable space:
628 if (b >= ca->mi.nbuckets)
631 percpu_down_read(&c->mark_lock);
632 g = gc_bucket(ca, b);
633 old = bucket_cmpxchg(g, new, ({
634 new.data_type = data_type;
635 overflow = checked_add(new.dirty_sectors, sectors);
638 bch2_fs_inconsistent_on(old.data_type &&
639 old.data_type != data_type, c,
640 "different types of data in same bucket: %s, %s",
641 bch2_data_types[old.data_type],
642 bch2_data_types[data_type]);
644 bch2_fs_inconsistent_on(overflow, c,
645 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
646 ca->dev_idx, b, new.gen,
647 bch2_data_types[old.data_type ?: data_type],
648 old.dirty_sectors, sectors);
650 bch2_dev_usage_update(c, ca, old, new, 0, true);
651 percpu_up_read(&c->mark_lock);
654 static s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
656 EBUG_ON(sectors < 0);
658 return p.crc.compression_type &&
659 p.crc.compression_type != BCH_COMPRESSION_TYPE_incompressible
660 ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
661 p.crc.uncompressed_size)
665 static int check_bucket_ref(struct bch_fs *c,
667 const struct bch_extent_ptr *ptr,
668 s64 sectors, enum bch_data_type ptr_data_type,
669 u8 bucket_gen, u8 bucket_data_type,
670 u16 dirty_sectors, u16 cached_sectors)
672 size_t bucket_nr = PTR_BUCKET_NR(bch_dev_bkey_exists(c, ptr->dev), ptr);
673 u16 bucket_sectors = !ptr->cached
678 if (gen_after(ptr->gen, bucket_gen)) {
679 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
680 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
682 ptr->dev, bucket_nr, bucket_gen,
683 bch2_data_types[bucket_data_type ?: ptr_data_type],
685 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
689 if (gen_cmp(bucket_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
690 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
691 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
693 ptr->dev, bucket_nr, bucket_gen,
694 bch2_data_types[bucket_data_type ?: ptr_data_type],
696 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
700 if (bucket_gen != ptr->gen && !ptr->cached) {
701 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
702 "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
704 ptr->dev, bucket_nr, bucket_gen,
705 bch2_data_types[bucket_data_type ?: ptr_data_type],
707 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
711 if (bucket_gen != ptr->gen)
714 if (bucket_data_type && ptr_data_type &&
715 bucket_data_type != ptr_data_type) {
716 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
717 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
719 ptr->dev, bucket_nr, bucket_gen,
720 bch2_data_types[bucket_data_type],
721 bch2_data_types[ptr_data_type],
722 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
726 if ((unsigned) (bucket_sectors + sectors) > U16_MAX) {
727 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
728 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
730 ptr->dev, bucket_nr, bucket_gen,
731 bch2_data_types[bucket_data_type ?: ptr_data_type],
732 bucket_sectors, sectors,
733 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
740 static int mark_stripe_bucket(struct btree_trans *trans,
745 struct bch_fs *c = trans->c;
746 u64 journal_seq = trans->journal_res.seq;
747 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
748 unsigned nr_data = s->nr_blocks - s->nr_redundant;
749 bool parity = ptr_idx >= nr_data;
750 enum bch_data_type data_type = parity ? BCH_DATA_parity : 0;
751 s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
752 const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
753 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
755 struct bucket_mark new, old;
759 BUG_ON(!(flags & BTREE_TRIGGER_GC));
761 /* * XXX doesn't handle deletion */
763 percpu_down_read(&c->mark_lock);
764 g = PTR_GC_BUCKET(ca, ptr);
766 if (g->mark.dirty_sectors ||
767 (g->stripe && g->stripe != k.k->p.offset)) {
768 bch2_fs_inconsistent(c,
769 "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
770 ptr->dev, PTR_BUCKET_NR(ca, ptr), g->mark.gen,
771 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
776 old = bucket_cmpxchg(g, new, ({
777 ret = check_bucket_ref(c, k, ptr, sectors, data_type,
778 new.gen, new.data_type,
779 new.dirty_sectors, new.cached_sectors);
783 new.dirty_sectors += sectors;
785 new.data_type = data_type;
790 g->stripe = k.k->p.offset;
791 g->stripe_redundancy = s->nr_redundant;
793 bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
795 percpu_up_read(&c->mark_lock);
800 static int __mark_pointer(struct btree_trans *trans,
802 const struct bch_extent_ptr *ptr,
803 s64 sectors, enum bch_data_type ptr_data_type,
804 u8 bucket_gen, u8 *bucket_data_type,
805 u16 *dirty_sectors, u16 *cached_sectors)
807 u16 *dst_sectors = !ptr->cached
810 int ret = check_bucket_ref(trans->c, k, ptr, sectors, ptr_data_type,
811 bucket_gen, *bucket_data_type,
812 *dirty_sectors, *cached_sectors);
817 *dst_sectors += sectors;
818 *bucket_data_type = *dirty_sectors || *cached_sectors
823 static int bch2_mark_pointer(struct btree_trans *trans,
825 struct extent_ptr_decoded p,
826 s64 sectors, enum bch_data_type data_type,
829 u64 journal_seq = trans->journal_res.seq;
830 struct bch_fs *c = trans->c;
831 struct bucket_mark old, new;
832 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
838 BUG_ON(!(flags & BTREE_TRIGGER_GC));
840 percpu_down_read(&c->mark_lock);
841 g = PTR_GC_BUCKET(ca, &p.ptr);
843 v = atomic64_read(&g->_mark.v);
845 new.v.counter = old.v.counter = v;
846 bucket_data_type = new.data_type;
848 ret = __mark_pointer(trans, k, &p.ptr, sectors,
852 &new.cached_sectors);
856 new.data_type = bucket_data_type;
858 if (flags & BTREE_TRIGGER_NOATOMIC) {
862 } while ((v = atomic64_cmpxchg(&g->_mark.v,
864 new.v.counter)) != old.v.counter);
866 bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
868 percpu_up_read(&c->mark_lock);
873 static int bch2_mark_stripe_ptr(struct btree_trans *trans,
875 struct bch_extent_stripe_ptr p,
876 enum bch_data_type data_type,
880 struct bch_fs *c = trans->c;
881 struct bch_replicas_padded r;
884 BUG_ON(!(flags & BTREE_TRIGGER_GC));
886 m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
888 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
893 spin_lock(&c->ec_stripes_heap_lock);
895 if (!m || !m->alive) {
896 spin_unlock(&c->ec_stripes_heap_lock);
897 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
899 bch2_inconsistent_error(c);
903 m->block_sectors[p.block] += sectors;
906 spin_unlock(&c->ec_stripes_heap_lock);
908 r.e.data_type = data_type;
909 update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
914 static int bch2_mark_extent(struct btree_trans *trans,
915 struct bkey_s_c old, struct bkey_s_c new,
918 u64 journal_seq = trans->journal_res.seq;
919 struct bch_fs *c = trans->c;
920 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
921 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
922 const union bch_extent_entry *entry;
923 struct extent_ptr_decoded p;
924 struct bch_replicas_padded r;
925 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
928 s64 sectors = bkey_is_btree_ptr(k.k)
931 s64 dirty_sectors = 0;
935 BUG_ON(!(flags & BTREE_TRIGGER_GC));
937 r.e.data_type = data_type;
941 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
942 s64 disk_sectors = ptr_disk_sectors(sectors, p);
944 if (flags & BTREE_TRIGGER_OVERWRITE)
945 disk_sectors = -disk_sectors;
947 ret = bch2_mark_pointer(trans, k, p, disk_sectors,
956 ret = update_cached_sectors(c, k, p.ptr.dev,
957 disk_sectors, journal_seq, true);
959 bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
963 } else if (!p.has_ec) {
964 dirty_sectors += disk_sectors;
965 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
967 ret = bch2_mark_stripe_ptr(trans, k, p.ec, data_type,
968 disk_sectors, flags);
973 * There may be other dirty pointers in this extent, but
974 * if so they're not required for mounting if we have an
975 * erasure coded pointer in this extent:
982 ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true);
986 bch2_bkey_val_to_text(&PBUF(buf), c, k);
987 bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
995 static int bch2_mark_stripe(struct btree_trans *trans,
996 struct bkey_s_c old, struct bkey_s_c new,
999 bool gc = flags & BTREE_TRIGGER_GC;
1000 u64 journal_seq = trans->journal_res.seq;
1001 struct bch_fs *c = trans->c;
1002 u64 idx = new.k->p.offset;
1003 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1004 ? bkey_s_c_to_stripe(old).v : NULL;
1005 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1006 ? bkey_s_c_to_stripe(new).v : NULL;
1010 BUG_ON(gc && old_s);
1013 struct stripe *m = genradix_ptr(&c->stripes, idx);
1015 if (!m || (old_s && !m->alive)) {
1016 char buf1[200], buf2[200];
1018 bch2_bkey_val_to_text(&PBUF(buf1), c, old);
1019 bch2_bkey_val_to_text(&PBUF(buf2), c, new);
1020 bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
1022 "new %s", idx, buf1, buf2);
1023 bch2_inconsistent_error(c);
1028 spin_lock(&c->ec_stripes_heap_lock);
1029 bch2_stripes_heap_del(c, m, idx);
1030 spin_unlock(&c->ec_stripes_heap_lock);
1032 memset(m, 0, sizeof(*m));
1035 m->sectors = le16_to_cpu(new_s->sectors);
1036 m->algorithm = new_s->algorithm;
1037 m->nr_blocks = new_s->nr_blocks;
1038 m->nr_redundant = new_s->nr_redundant;
1039 m->blocks_nonempty = 0;
1041 for (i = 0; i < new_s->nr_blocks; i++)
1042 m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
1044 spin_lock(&c->ec_stripes_heap_lock);
1045 bch2_stripes_heap_update(c, m, idx);
1046 spin_unlock(&c->ec_stripes_heap_lock);
1049 struct gc_stripe *m =
1050 genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
1053 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
1058 * This will be wrong when we bring back runtime gc: we should
1059 * be unmarking the old key and then marking the new key
1062 m->sectors = le16_to_cpu(new_s->sectors);
1063 m->nr_blocks = new_s->nr_blocks;
1064 m->nr_redundant = new_s->nr_redundant;
1066 for (i = 0; i < new_s->nr_blocks; i++)
1067 m->ptrs[i] = new_s->ptrs[i];
1069 bch2_bkey_to_replicas(&m->r.e, new);
1072 * gc recalculates this field from stripe ptr
1075 memset(m->block_sectors, 0, sizeof(m->block_sectors));
1077 for (i = 0; i < new_s->nr_blocks; i++) {
1078 ret = mark_stripe_bucket(trans, new, i, flags);
1083 ret = update_replicas(c, new, &m->r.e,
1084 ((s64) m->sectors * m->nr_redundant),
1089 bch2_bkey_val_to_text(&PBUF(buf), c, new);
1090 bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
1098 static int bch2_mark_inode(struct btree_trans *trans,
1099 struct bkey_s_c old, struct bkey_s_c new,
1102 struct bch_fs *c = trans->c;
1103 struct bch_fs_usage __percpu *fs_usage;
1104 u64 journal_seq = trans->journal_res.seq;
1106 if (flags & BTREE_TRIGGER_INSERT) {
1107 struct bch_inode_v2 *v = (struct bch_inode_v2 *) new.v;
1109 BUG_ON(!journal_seq);
1110 BUG_ON(new.k->type != KEY_TYPE_inode_v2);
1112 v->bi_journal_seq = cpu_to_le64(journal_seq);
1115 if (flags & BTREE_TRIGGER_GC) {
1116 percpu_down_read(&c->mark_lock);
1119 fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
1120 fs_usage->nr_inodes += bkey_is_inode(new.k);
1121 fs_usage->nr_inodes -= bkey_is_inode(old.k);
1124 percpu_up_read(&c->mark_lock);
1129 static int bch2_mark_reservation(struct btree_trans *trans,
1130 struct bkey_s_c old, struct bkey_s_c new,
1133 struct bch_fs *c = trans->c;
1134 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
1135 struct bch_fs_usage __percpu *fs_usage;
1136 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1137 s64 sectors = (s64) k.k->size;
1139 BUG_ON(!(flags & BTREE_TRIGGER_GC));
1141 if (flags & BTREE_TRIGGER_OVERWRITE)
1143 sectors *= replicas;
1145 percpu_down_read(&c->mark_lock);
1148 fs_usage = fs_usage_ptr(c, trans->journal_res.seq, flags & BTREE_TRIGGER_GC);
1149 replicas = clamp_t(unsigned, replicas, 1,
1150 ARRAY_SIZE(fs_usage->persistent_reserved));
1152 fs_usage->reserved += sectors;
1153 fs_usage->persistent_reserved[replicas - 1] += sectors;
1156 percpu_up_read(&c->mark_lock);
1161 static s64 __bch2_mark_reflink_p(struct bch_fs *c, struct bkey_s_c_reflink_p p,
1162 u64 *idx, unsigned flags, size_t r_idx)
1164 struct reflink_gc *r;
1165 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1168 if (r_idx >= c->reflink_gc_nr)
1171 r = genradix_ptr(&c->reflink_gc_table, r_idx);
1172 if (*idx < r->offset - r->size)
1175 BUG_ON((s64) r->refcount + add < 0);
1185 * XXX: we're replacing the entire reflink pointer with an error
1186 * key, we should just be replacing the part that was missing:
1188 if (fsck_err(c, "%llu:%llu len %u points to nonexistent indirect extent %llu",
1189 p.k->p.inode, p.k->p.offset, p.k->size, *idx)) {
1190 struct bkey_i_error new;
1193 new.k.type = KEY_TYPE_error;
1195 new.k.size = p.k->size;
1196 ret = bch2_journal_key_insert(c, BTREE_ID_extents, 0, &new.k_i);
1202 static int bch2_mark_reflink_p(struct btree_trans *trans,
1203 struct bkey_s_c old, struct bkey_s_c new,
1206 struct bch_fs *c = trans->c;
1207 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
1208 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
1209 struct reflink_gc *ref;
1211 u64 idx = le64_to_cpu(p.v->idx);
1212 u64 end = le64_to_cpu(p.v->idx) + p.k->size;
1215 BUG_ON(!(flags & BTREE_TRIGGER_GC));
1217 if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) {
1218 idx -= le32_to_cpu(p.v->front_pad);
1219 end += le32_to_cpu(p.v->back_pad);
1223 r = c->reflink_gc_nr;
1225 m = l + (r - l) / 2;
1227 ref = genradix_ptr(&c->reflink_gc_table, m);
1228 if (ref->offset <= idx)
1234 while (idx < end && !ret)
1235 ret = __bch2_mark_reflink_p(c, p, &idx, flags, l++);
1240 int bch2_mark_key(struct btree_trans *trans,
1241 struct bkey_s_c old,
1242 struct bkey_s_c new,
1245 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
1247 switch (k.k->type) {
1248 case KEY_TYPE_alloc:
1249 case KEY_TYPE_alloc_v2:
1250 case KEY_TYPE_alloc_v3:
1251 return bch2_mark_alloc(trans, old, new, flags);
1252 case KEY_TYPE_btree_ptr:
1253 case KEY_TYPE_btree_ptr_v2:
1254 case KEY_TYPE_extent:
1255 case KEY_TYPE_reflink_v:
1256 return bch2_mark_extent(trans, old, new, flags);
1257 case KEY_TYPE_stripe:
1258 return bch2_mark_stripe(trans, old, new, flags);
1259 case KEY_TYPE_inode:
1260 case KEY_TYPE_inode_v2:
1261 return bch2_mark_inode(trans, old, new, flags);
1262 case KEY_TYPE_reservation:
1263 return bch2_mark_reservation(trans, old, new, flags);
1264 case KEY_TYPE_reflink_p:
1265 return bch2_mark_reflink_p(trans, old, new, flags);
1266 case KEY_TYPE_snapshot:
1267 return bch2_mark_snapshot(trans, old, new, flags);
1273 int bch2_mark_update(struct btree_trans *trans, struct btree_path *path,
1274 struct bkey_i *new, unsigned flags)
1276 struct bkey _deleted = KEY(0, 0, 0);
1277 struct bkey_s_c deleted = (struct bkey_s_c) { &_deleted, NULL };
1278 struct bkey_s_c old;
1279 struct bkey unpacked;
1282 _deleted.p = path->pos;
1284 if (unlikely(flags & BTREE_TRIGGER_NORUN))
1287 if (!btree_node_type_needs_gc(path->btree_id))
1290 old = bch2_btree_path_peek_slot(path, &unpacked);
1292 if (old.k->type == new->k.type &&
1293 ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
1294 ret = bch2_mark_key(trans, old, bkey_i_to_s_c(new),
1295 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1297 ret = bch2_mark_key(trans, deleted, bkey_i_to_s_c(new),
1298 BTREE_TRIGGER_INSERT|flags) ?:
1299 bch2_mark_key(trans, old, deleted,
1300 BTREE_TRIGGER_OVERWRITE|flags);
1306 static noinline __cold
1307 void fs_usage_apply_warn(struct btree_trans *trans,
1308 unsigned disk_res_sectors,
1309 s64 should_not_have_added)
1311 struct bch_fs *c = trans->c;
1312 struct btree_insert_entry *i;
1315 bch_err(c, "disk usage increased %lli more than %u sectors reserved",
1316 should_not_have_added, disk_res_sectors);
1318 trans_for_each_update(trans, i) {
1319 pr_err("while inserting");
1320 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1322 pr_err("overlapping with");
1326 struct bkey_s_c k = bch2_btree_path_peek_slot(i->path, &u);
1328 bch2_bkey_val_to_text(&PBUF(buf), c, k);
1331 struct bkey_cached *ck = (void *) i->path->l[0].b;
1334 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
1342 int bch2_trans_fs_usage_apply(struct btree_trans *trans,
1343 struct replicas_delta_list *deltas)
1345 struct bch_fs *c = trans->c;
1346 static int warned_disk_usage = 0;
1348 unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1349 struct replicas_delta *d = deltas->d, *d2;
1350 struct replicas_delta *top = (void *) deltas->d + deltas->used;
1351 struct bch_fs_usage *dst;
1352 s64 added = 0, should_not_have_added;
1355 percpu_down_read(&c->mark_lock);
1357 dst = fs_usage_ptr(c, trans->journal_res.seq, false);
1359 for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
1360 switch (d->r.data_type) {
1361 case BCH_DATA_btree:
1363 case BCH_DATA_parity:
1367 if (__update_replicas(c, dst, &d->r, d->delta))
1371 dst->nr_inodes += deltas->nr_inodes;
1373 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
1374 added += deltas->persistent_reserved[i];
1375 dst->reserved += deltas->persistent_reserved[i];
1376 dst->persistent_reserved[i] += deltas->persistent_reserved[i];
1380 * Not allowed to reduce sectors_available except by getting a
1383 should_not_have_added = added - (s64) disk_res_sectors;
1384 if (unlikely(should_not_have_added > 0)) {
1385 u64 old, new, v = atomic64_read(&c->sectors_available);
1389 new = max_t(s64, 0, old - should_not_have_added);
1390 } while ((v = atomic64_cmpxchg(&c->sectors_available,
1393 added -= should_not_have_added;
1398 trans->disk_res->sectors -= added;
1399 this_cpu_sub(*c->online_reserved, added);
1403 percpu_up_read(&c->mark_lock);
1405 if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
1406 fs_usage_apply_warn(trans, disk_res_sectors, should_not_have_added);
1409 /* revert changes: */
1410 for (d2 = deltas->d; d2 != d; d2 = replicas_delta_next(d2))
1411 BUG_ON(__update_replicas(c, dst, &d2->r, -d2->delta));
1414 percpu_up_read(&c->mark_lock);
1420 static int bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
1421 const struct bch_extent_ptr *ptr,
1422 struct bkey_alloc_unpacked *u)
1424 struct bch_fs *c = trans->c;
1425 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1429 bch2_trans_iter_init(trans, iter, BTREE_ID_alloc,
1430 POS(ptr->dev, PTR_BUCKET_NR(ca, ptr)),
1431 BTREE_ITER_WITH_UPDATES|
1434 k = bch2_btree_iter_peek_slot(iter);
1437 bch2_trans_iter_exit(trans, iter);
1441 *u = bch2_alloc_unpack(k);
1445 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1446 struct bkey_s_c k, struct extent_ptr_decoded p,
1447 s64 sectors, enum bch_data_type data_type)
1449 struct btree_iter iter;
1450 struct bkey_alloc_unpacked u;
1453 ret = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
1457 ret = __mark_pointer(trans, k, &p.ptr, sectors, data_type,
1458 u.gen, &u.data_type,
1459 &u.dirty_sectors, &u.cached_sectors);
1463 ret = bch2_alloc_write(trans, &iter, &u, 0);
1467 bch2_trans_iter_exit(trans, &iter);
1471 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1472 struct extent_ptr_decoded p,
1473 s64 sectors, enum bch_data_type data_type)
1475 struct bch_fs *c = trans->c;
1476 struct btree_iter iter;
1478 struct bkey_i_stripe *s;
1479 struct bch_replicas_padded r;
1482 bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes, POS(0, p.ec.idx),
1484 BTREE_ITER_WITH_UPDATES);
1485 k = bch2_btree_iter_peek_slot(&iter);
1490 if (k.k->type != KEY_TYPE_stripe) {
1491 bch2_fs_inconsistent(c,
1492 "pointer to nonexistent stripe %llu",
1494 bch2_inconsistent_error(c);
1499 if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) {
1500 bch2_fs_inconsistent(c,
1501 "stripe pointer doesn't match stripe %llu",
1507 s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1508 ret = PTR_ERR_OR_ZERO(s);
1512 bkey_reassemble(&s->k_i, k);
1513 stripe_blockcount_set(&s->v, p.ec.block,
1514 stripe_blockcount_get(&s->v, p.ec.block) +
1517 ret = bch2_trans_update(trans, &iter, &s->k_i, 0);
1521 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1522 r.e.data_type = data_type;
1523 update_replicas_list(trans, &r.e, sectors);
1525 bch2_trans_iter_exit(trans, &iter);
1529 static int bch2_trans_mark_extent(struct btree_trans *trans,
1530 struct bkey_s_c k, unsigned flags)
1532 struct bch_fs *c = trans->c;
1533 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1534 const union bch_extent_entry *entry;
1535 struct extent_ptr_decoded p;
1536 struct bch_replicas_padded r;
1537 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
1540 s64 sectors = bkey_is_btree_ptr(k.k)
1543 s64 dirty_sectors = 0;
1547 r.e.data_type = data_type;
1549 r.e.nr_required = 1;
1551 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1552 s64 disk_sectors = ptr_disk_sectors(sectors, p);
1554 if (flags & BTREE_TRIGGER_OVERWRITE)
1555 disk_sectors = -disk_sectors;
1557 ret = bch2_trans_mark_pointer(trans, k, p,
1558 disk_sectors, data_type);
1566 update_cached_sectors_list(trans, p.ptr.dev,
1568 } else if (!p.has_ec) {
1569 dirty_sectors += disk_sectors;
1570 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1572 ret = bch2_trans_mark_stripe_ptr(trans, p,
1573 disk_sectors, data_type);
1577 r.e.nr_required = 0;
1582 update_replicas_list(trans, &r.e, dirty_sectors);
1587 static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
1588 struct bkey_s_c_stripe s,
1589 unsigned idx, bool deleting)
1591 struct bch_fs *c = trans->c;
1592 const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
1593 struct btree_iter iter;
1594 struct bkey_alloc_unpacked u;
1595 enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
1596 ? BCH_DATA_parity : 0;
1597 s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
1603 ret = bch2_trans_start_alloc_update(trans, &iter, ptr, &u);
1607 ret = check_bucket_ref(c, s.s_c, ptr, sectors, data_type,
1609 u.dirty_sectors, u.cached_sectors);
1614 if (bch2_fs_inconsistent_on(u.stripe ||
1615 u.stripe_redundancy, c,
1616 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
1617 iter.pos.inode, iter.pos.offset, u.gen,
1618 bch2_data_types[u.data_type],
1620 u.stripe, s.k->p.offset)) {
1625 if (bch2_fs_inconsistent_on(data_type && u.dirty_sectors, c,
1626 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
1627 iter.pos.inode, iter.pos.offset, u.gen,
1628 bch2_data_types[u.data_type],
1635 u.stripe = s.k->p.offset;
1636 u.stripe_redundancy = s.v->nr_redundant;
1638 if (bch2_fs_inconsistent_on(u.stripe != s.k->p.offset ||
1639 u.stripe_redundancy != s.v->nr_redundant, c,
1640 "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
1641 iter.pos.inode, iter.pos.offset, u.gen,
1642 s.k->p.offset, u.stripe)) {
1648 u.stripe_redundancy = 0;
1651 u.dirty_sectors += sectors;
1653 u.data_type = !deleting ? data_type : 0;
1655 ret = bch2_alloc_write(trans, &iter, &u, 0);
1659 bch2_trans_iter_exit(trans, &iter);
1663 static int bch2_trans_mark_stripe(struct btree_trans *trans,
1664 struct bkey_s_c old, struct bkey_s_c new,
1667 struct bkey_s_c_stripe old_s = { .k = NULL };
1668 struct bkey_s_c_stripe new_s = { .k = NULL };
1669 struct bch_replicas_padded r;
1670 unsigned i, nr_blocks;
1673 if (old.k->type == KEY_TYPE_stripe)
1674 old_s = bkey_s_c_to_stripe(old);
1675 if (new.k->type == KEY_TYPE_stripe)
1676 new_s = bkey_s_c_to_stripe(new);
1679 * If the pointers aren't changing, we don't need to do anything:
1681 if (new_s.k && old_s.k &&
1682 new_s.v->nr_blocks == old_s.v->nr_blocks &&
1683 new_s.v->nr_redundant == old_s.v->nr_redundant &&
1684 !memcmp(old_s.v->ptrs, new_s.v->ptrs,
1685 new_s.v->nr_blocks * sizeof(struct bch_extent_ptr)))
1688 BUG_ON(new_s.k && old_s.k &&
1689 (new_s.v->nr_blocks != old_s.v->nr_blocks ||
1690 new_s.v->nr_redundant != old_s.v->nr_redundant));
1692 nr_blocks = new_s.k ? new_s.v->nr_blocks : old_s.v->nr_blocks;
1695 s64 sectors = le16_to_cpu(new_s.v->sectors);
1697 bch2_bkey_to_replicas(&r.e, new);
1698 update_replicas_list(trans, &r.e, sectors * new_s.v->nr_redundant);
1702 s64 sectors = -((s64) le16_to_cpu(old_s.v->sectors));
1704 bch2_bkey_to_replicas(&r.e, old);
1705 update_replicas_list(trans, &r.e, sectors * old_s.v->nr_redundant);
1708 for (i = 0; i < nr_blocks; i++) {
1709 if (new_s.k && old_s.k &&
1710 !memcmp(&new_s.v->ptrs[i],
1712 sizeof(new_s.v->ptrs[i])))
1716 ret = bch2_trans_mark_stripe_bucket(trans, new_s, i, false);
1722 ret = bch2_trans_mark_stripe_bucket(trans, old_s, i, true);
1731 static int bch2_trans_mark_inode(struct btree_trans *trans,
1732 struct bkey_s_c old,
1733 struct bkey_s_c new,
1736 int nr = bkey_is_inode(new.k) - bkey_is_inode(old.k);
1739 struct replicas_delta_list *d =
1740 replicas_deltas_realloc(trans, 0);
1747 static int bch2_trans_mark_reservation(struct btree_trans *trans,
1748 struct bkey_s_c k, unsigned flags)
1750 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1751 s64 sectors = (s64) k.k->size;
1752 struct replicas_delta_list *d;
1754 if (flags & BTREE_TRIGGER_OVERWRITE)
1756 sectors *= replicas;
1758 d = replicas_deltas_realloc(trans, 0);
1760 replicas = clamp_t(unsigned, replicas, 1,
1761 ARRAY_SIZE(d->persistent_reserved));
1763 d->persistent_reserved[replicas - 1] += sectors;
1767 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1768 struct bkey_s_c_reflink_p p,
1769 u64 *idx, unsigned flags)
1771 struct bch_fs *c = trans->c;
1772 struct btree_iter iter;
1776 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1780 bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx),
1782 BTREE_ITER_WITH_UPDATES);
1783 k = bch2_btree_iter_peek_slot(&iter);
1788 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1789 ret = PTR_ERR_OR_ZERO(n);
1793 bkey_reassemble(n, k);
1795 refcount = bkey_refcount(n);
1797 bch2_bkey_val_to_text(&PBUF(buf), c, p.s_c);
1798 bch2_fs_inconsistent(c,
1799 "nonexistent indirect extent at %llu while marking\n %s",
1805 if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
1806 bch2_bkey_val_to_text(&PBUF(buf), c, p.s_c);
1807 bch2_fs_inconsistent(c,
1808 "indirect extent refcount underflow at %llu while marking\n %s",
1814 if (flags & BTREE_TRIGGER_INSERT) {
1815 struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
1818 pad = max_t(s64, le32_to_cpu(v->front_pad),
1819 le64_to_cpu(v->idx) - bkey_start_offset(k.k));
1820 BUG_ON(pad > U32_MAX);
1821 v->front_pad = cpu_to_le32(pad);
1823 pad = max_t(s64, le32_to_cpu(v->back_pad),
1824 k.k->p.offset - p.k->size - le64_to_cpu(v->idx));
1825 BUG_ON(pad > U32_MAX);
1826 v->back_pad = cpu_to_le32(pad);
1829 le64_add_cpu(refcount, add);
1832 n->k.type = KEY_TYPE_deleted;
1833 set_bkey_val_u64s(&n->k, 0);
1836 bch2_btree_iter_set_pos_to_extent_start(&iter);
1837 ret = bch2_trans_update(trans, &iter, n, 0);
1841 *idx = k.k->p.offset;
1843 bch2_trans_iter_exit(trans, &iter);
1847 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1848 struct bkey_s_c k, unsigned flags)
1850 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
1854 if (flags & BTREE_TRIGGER_INSERT) {
1855 struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
1857 v->front_pad = v->back_pad = 0;
1860 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
1861 end_idx = le64_to_cpu(p.v->idx) + p.k->size +
1862 le32_to_cpu(p.v->back_pad);
1864 while (idx < end_idx && !ret)
1865 ret = __bch2_trans_mark_reflink_p(trans, p, &idx, flags);
1870 int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c old,
1871 struct bkey_s_c new, unsigned flags)
1873 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
1875 switch (k.k->type) {
1876 case KEY_TYPE_btree_ptr:
1877 case KEY_TYPE_btree_ptr_v2:
1878 case KEY_TYPE_extent:
1879 case KEY_TYPE_reflink_v:
1880 return bch2_trans_mark_extent(trans, k, flags);
1881 case KEY_TYPE_stripe:
1882 return bch2_trans_mark_stripe(trans, old, new, flags);
1883 case KEY_TYPE_inode:
1884 case KEY_TYPE_inode_v2:
1885 return bch2_trans_mark_inode(trans, old, new, flags);
1886 case KEY_TYPE_reservation:
1887 return bch2_trans_mark_reservation(trans, k, flags);
1888 case KEY_TYPE_reflink_p:
1889 return bch2_trans_mark_reflink_p(trans, k, flags);
1895 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1896 struct bch_dev *ca, size_t b,
1897 enum bch_data_type type,
1900 struct bch_fs *c = trans->c;
1901 struct btree_iter iter;
1902 struct bkey_alloc_unpacked u;
1903 struct bch_extent_ptr ptr = {
1905 .offset = bucket_to_sector(ca, b),
1910 * Backup superblock might be past the end of our normal usable space:
1912 if (b >= ca->mi.nbuckets)
1915 ret = bch2_trans_start_alloc_update(trans, &iter, &ptr, &u);
1919 if (u.data_type && u.data_type != type) {
1920 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1921 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
1923 iter.pos.inode, iter.pos.offset, u.gen,
1924 bch2_data_types[u.data_type],
1925 bch2_data_types[type],
1926 bch2_data_types[type]);
1932 u.dirty_sectors = sectors;
1934 ret = bch2_alloc_write(trans, &iter, &u, 0);
1938 bch2_trans_iter_exit(trans, &iter);
1942 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1943 struct bch_dev *ca, size_t b,
1944 enum bch_data_type type,
1947 return __bch2_trans_do(trans, NULL, NULL, 0,
1948 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
1951 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
1954 enum bch_data_type type,
1955 u64 *bucket, unsigned *bucket_sectors)
1958 u64 b = sector_to_bucket(ca, start);
1960 min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1962 if (b != *bucket && *bucket_sectors) {
1963 int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1964 type, *bucket_sectors);
1968 *bucket_sectors = 0;
1972 *bucket_sectors += sectors;
1974 } while (start < end);
1979 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
1982 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1984 unsigned i, bucket_sectors = 0;
1987 for (i = 0; i < layout->nr_superblocks; i++) {
1988 u64 offset = le64_to_cpu(layout->sb_offset[i]);
1990 if (offset == BCH_SB_SECTOR) {
1991 ret = bch2_trans_mark_metadata_sectors(trans, ca,
1993 BCH_DATA_sb, &bucket, &bucket_sectors);
1998 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1999 offset + (1 << layout->sb_max_size_bits),
2000 BCH_DATA_sb, &bucket, &bucket_sectors);
2005 if (bucket_sectors) {
2006 ret = bch2_trans_mark_metadata_bucket(trans, ca,
2007 bucket, BCH_DATA_sb, bucket_sectors);
2012 for (i = 0; i < ca->journal.nr; i++) {
2013 ret = bch2_trans_mark_metadata_bucket(trans, ca,
2014 ca->journal.buckets[i],
2015 BCH_DATA_journal, ca->mi.bucket_size);
2023 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
2025 return bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
2026 __bch2_trans_mark_dev_sb(&trans, ca));
2029 /* Disk reservations: */
2031 #define SECTORS_CACHE 1024
2033 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
2034 u64 sectors, int flags)
2036 struct bch_fs_pcpu *pcpu;
2038 s64 sectors_available;
2041 percpu_down_read(&c->mark_lock);
2043 pcpu = this_cpu_ptr(c->pcpu);
2045 if (sectors <= pcpu->sectors_available)
2048 v = atomic64_read(&c->sectors_available);
2051 get = min((u64) sectors + SECTORS_CACHE, old);
2053 if (get < sectors) {
2057 } while ((v = atomic64_cmpxchg(&c->sectors_available,
2058 old, old - get)) != old);
2060 pcpu->sectors_available += get;
2063 pcpu->sectors_available -= sectors;
2064 this_cpu_add(*c->online_reserved, sectors);
2065 res->sectors += sectors;
2068 percpu_up_read(&c->mark_lock);
2072 mutex_lock(&c->sectors_available_lock);
2074 percpu_u64_set(&c->pcpu->sectors_available, 0);
2075 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
2077 if (sectors <= sectors_available ||
2078 (flags & BCH_DISK_RESERVATION_NOFAIL)) {
2079 atomic64_set(&c->sectors_available,
2080 max_t(s64, 0, sectors_available - sectors));
2081 this_cpu_add(*c->online_reserved, sectors);
2082 res->sectors += sectors;
2085 atomic64_set(&c->sectors_available, sectors_available);
2089 mutex_unlock(&c->sectors_available_lock);
2090 percpu_up_read(&c->mark_lock);
2095 /* Startup/shutdown: */
2097 static void buckets_free_rcu(struct rcu_head *rcu)
2099 struct bucket_array *buckets =
2100 container_of(rcu, struct bucket_array, rcu);
2104 buckets->nbuckets * sizeof(struct bucket));
2107 static void bucket_gens_free_rcu(struct rcu_head *rcu)
2109 struct bucket_gens *buckets =
2110 container_of(rcu, struct bucket_gens, rcu);
2112 kvpfree(buckets, sizeof(*buckets) + buckets->nbuckets);
2115 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2117 struct bucket_array *buckets = NULL, *old_buckets = NULL;
2118 struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
2119 unsigned long *buckets_nouse = NULL;
2120 alloc_fifo free[RESERVE_NR];
2121 alloc_fifo free_inc;
2122 alloc_heap alloc_heap;
2124 size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
2125 ca->mi.bucket_size / btree_sectors(c));
2126 /* XXX: these should be tunable */
2127 size_t reserve_none = max_t(size_t, 1, nbuckets >> 9);
2128 size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 6);
2129 size_t free_inc_nr = max(max_t(size_t, 1, nbuckets >> 12),
2131 bool resize = ca->buckets[0] != NULL;
2135 memset(&free, 0, sizeof(free));
2136 memset(&free_inc, 0, sizeof(free_inc));
2137 memset(&alloc_heap, 0, sizeof(alloc_heap));
2139 if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
2140 nbuckets * sizeof(struct bucket),
2141 GFP_KERNEL|__GFP_ZERO)) ||
2142 !(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
2143 GFP_KERNEL|__GFP_ZERO)) ||
2144 !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2145 sizeof(unsigned long),
2146 GFP_KERNEL|__GFP_ZERO)) ||
2147 !init_fifo(&free[RESERVE_MOVINGGC],
2148 copygc_reserve, GFP_KERNEL) ||
2149 !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
2150 !init_fifo(&free_inc, free_inc_nr, GFP_KERNEL) ||
2151 !init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
2154 buckets->first_bucket = ca->mi.first_bucket;
2155 buckets->nbuckets = nbuckets;
2156 bucket_gens->first_bucket = ca->mi.first_bucket;
2157 bucket_gens->nbuckets = nbuckets;
2159 bch2_copygc_stop(c);
2162 down_write(&c->gc_lock);
2163 down_write(&ca->bucket_lock);
2164 percpu_down_write(&c->mark_lock);
2167 old_buckets = bucket_array(ca);
2168 old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
2171 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
2175 n * sizeof(struct bucket));
2176 memcpy(bucket_gens->b,
2179 memcpy(buckets_nouse,
2181 BITS_TO_LONGS(n) * sizeof(unsigned long));
2184 rcu_assign_pointer(ca->buckets[0], buckets);
2185 rcu_assign_pointer(ca->bucket_gens, bucket_gens);
2186 buckets = old_buckets;
2187 bucket_gens = old_bucket_gens;
2189 swap(ca->buckets_nouse, buckets_nouse);
2192 percpu_up_write(&c->mark_lock);
2193 up_write(&c->gc_lock);
2196 spin_lock(&c->freelist_lock);
2197 for (i = 0; i < RESERVE_NR; i++) {
2198 fifo_move(&free[i], &ca->free[i]);
2199 swap(ca->free[i], free[i]);
2201 fifo_move(&free_inc, &ca->free_inc);
2202 swap(ca->free_inc, free_inc);
2203 spin_unlock(&c->freelist_lock);
2205 /* with gc lock held, alloc_heap can't be in use: */
2206 swap(ca->alloc_heap, alloc_heap);
2208 nbuckets = ca->mi.nbuckets;
2211 up_write(&ca->bucket_lock);
2215 free_heap(&alloc_heap);
2216 free_fifo(&free_inc);
2217 for (i = 0; i < RESERVE_NR; i++)
2218 free_fifo(&free[i]);
2219 kvpfree(buckets_nouse,
2220 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2222 call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
2224 call_rcu(&buckets->rcu, buckets_free_rcu);
2229 void bch2_dev_buckets_free(struct bch_dev *ca)
2233 free_heap(&ca->alloc_heap);
2234 free_fifo(&ca->free_inc);
2235 for (i = 0; i < RESERVE_NR; i++)
2236 free_fifo(&ca->free[i]);
2237 kvpfree(ca->buckets_nouse,
2238 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2239 kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
2240 sizeof(struct bucket_gens) + ca->mi.nbuckets);
2241 kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2242 sizeof(struct bucket_array) +
2243 ca->mi.nbuckets * sizeof(struct bucket));
2245 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
2246 free_percpu(ca->usage[i]);
2247 kfree(ca->usage_base);
2250 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2254 ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
2255 if (!ca->usage_base)
2258 for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
2259 ca->usage[i] = alloc_percpu(struct bch_dev_usage);
2264 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;