1 // SPDX-License-Identifier: GPL-2.0
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
9 #include "alloc_background.h"
12 #include "btree_update.h"
14 #include "buckets_waiting_for_journal.h"
22 #include "subvolume.h"
24 #include <linux/preempt.h>
25 #include <trace/events/bcachefs.h>
27 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
28 enum bch_data_type data_type,
33 fs_usage->btree += sectors;
37 fs_usage->data += sectors;
40 fs_usage->cached += sectors;
47 void bch2_fs_usage_initialize(struct bch_fs *c)
49 struct bch_fs_usage *usage;
53 percpu_down_write(&c->mark_lock);
54 usage = c->usage_base;
56 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
57 bch2_fs_usage_acc_to_base(c, i);
59 for (i = 0; i < BCH_REPLICAS_MAX; i++)
60 usage->reserved += usage->persistent_reserved[i];
62 for (i = 0; i < c->replicas.nr; i++) {
63 struct bch_replicas_entry *e =
64 cpu_replicas_entry(&c->replicas, i);
66 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
69 for_each_member_device(ca, c, i) {
70 struct bch_dev_usage dev = bch2_dev_usage_read(ca);
72 usage->hidden += (dev.d[BCH_DATA_sb].buckets +
73 dev.d[BCH_DATA_journal].buckets) *
77 percpu_up_write(&c->mark_lock);
80 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
84 BUG_ON(!gc && !journal_seq);
86 return this_cpu_ptr(gc
88 : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
91 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
93 struct bch_fs *c = ca->fs;
94 struct bch_dev_usage ret;
95 unsigned seq, i, u64s = dev_usage_u64s();
98 seq = read_seqcount_begin(&c->usage_lock);
99 memcpy(&ret, ca->usage_base, u64s * sizeof(u64));
100 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
101 acc_u64s_percpu((u64 *) &ret, (u64 __percpu *) ca->usage[i], u64s);
102 } while (read_seqcount_retry(&c->usage_lock, seq));
107 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
108 unsigned journal_seq,
111 percpu_rwsem_assert_held(&c->mark_lock);
112 BUG_ON(!gc && !journal_seq);
114 return this_cpu_ptr(gc
116 : c->usage[journal_seq & JOURNAL_BUF_MASK]);
119 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
121 ssize_t offset = v - (u64 *) c->usage_base;
125 BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
126 percpu_rwsem_assert_held(&c->mark_lock);
129 seq = read_seqcount_begin(&c->usage_lock);
132 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
133 ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
134 } while (read_seqcount_retry(&c->usage_lock, seq));
139 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
141 struct bch_fs_usage_online *ret;
142 unsigned seq, i, u64s;
144 percpu_down_read(&c->mark_lock);
146 ret = kmalloc(sizeof(struct bch_fs_usage_online) +
147 sizeof(u64) * c->replicas.nr, GFP_NOFS);
148 if (unlikely(!ret)) {
149 percpu_up_read(&c->mark_lock);
153 ret->online_reserved = percpu_u64_get(c->online_reserved);
155 u64s = fs_usage_u64s(c);
157 seq = read_seqcount_begin(&c->usage_lock);
158 memcpy(&ret->u, c->usage_base, u64s * sizeof(u64));
159 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
160 acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], u64s);
161 } while (read_seqcount_retry(&c->usage_lock, seq));
166 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
169 unsigned i, u64s = fs_usage_u64s(c);
171 BUG_ON(idx >= ARRAY_SIZE(c->usage));
174 write_seqcount_begin(&c->usage_lock);
176 acc_u64s_percpu((u64 *) c->usage_base,
177 (u64 __percpu *) c->usage[idx], u64s);
178 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
181 for_each_member_device_rcu(ca, c, i, NULL) {
182 u64s = dev_usage_u64s();
184 acc_u64s_percpu((u64 *) ca->usage_base,
185 (u64 __percpu *) ca->usage[idx], u64s);
186 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
190 write_seqcount_end(&c->usage_lock);
194 void bch2_fs_usage_to_text(struct printbuf *out,
196 struct bch_fs_usage_online *fs_usage)
200 pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
202 pr_buf(out, "hidden:\t\t\t\t%llu\n",
204 pr_buf(out, "data:\t\t\t\t%llu\n",
206 pr_buf(out, "cached:\t\t\t\t%llu\n",
208 pr_buf(out, "reserved:\t\t\t%llu\n",
209 fs_usage->u.reserved);
210 pr_buf(out, "nr_inodes:\t\t\t%llu\n",
211 fs_usage->u.nr_inodes);
212 pr_buf(out, "online reserved:\t\t%llu\n",
213 fs_usage->online_reserved);
216 i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
218 pr_buf(out, "%u replicas:\n", i + 1);
219 pr_buf(out, "\treserved:\t\t%llu\n",
220 fs_usage->u.persistent_reserved[i]);
223 for (i = 0; i < c->replicas.nr; i++) {
224 struct bch_replicas_entry *e =
225 cpu_replicas_entry(&c->replicas, i);
228 bch2_replicas_entry_to_text(out, e);
229 pr_buf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
233 static u64 reserve_factor(u64 r)
235 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
238 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
240 return min(fs_usage->u.hidden +
243 reserve_factor(fs_usage->u.reserved +
244 fs_usage->online_reserved),
248 static struct bch_fs_usage_short
249 __bch2_fs_usage_read_short(struct bch_fs *c)
251 struct bch_fs_usage_short ret;
254 ret.capacity = c->capacity -
255 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
257 data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
258 bch2_fs_usage_read_one(c, &c->usage_base->btree);
259 reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
260 percpu_u64_get(c->online_reserved);
262 ret.used = min(ret.capacity, data + reserve_factor(reserved));
263 ret.free = ret.capacity - ret.used;
265 ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
270 struct bch_fs_usage_short
271 bch2_fs_usage_read_short(struct bch_fs *c)
273 struct bch_fs_usage_short ret;
275 percpu_down_read(&c->mark_lock);
276 ret = __bch2_fs_usage_read_short(c);
277 percpu_up_read(&c->mark_lock);
282 static inline int is_unavailable_bucket(struct bch_alloc_v4 a)
284 return a.dirty_sectors || a.stripe;
287 static inline int bucket_sectors_fragmented(struct bch_dev *ca,
288 struct bch_alloc_v4 a)
290 return a.dirty_sectors
291 ? max(0, (int) ca->mi.bucket_size - (int) a.dirty_sectors)
295 static inline enum bch_data_type bucket_type(struct bch_alloc_v4 a)
297 return a.cached_sectors && !a.dirty_sectors
302 static inline void account_bucket(struct bch_fs_usage *fs_usage,
303 struct bch_dev_usage *dev_usage,
304 enum bch_data_type type,
307 if (type == BCH_DATA_sb || type == BCH_DATA_journal)
308 fs_usage->hidden += size;
310 dev_usage->d[type].buckets += nr;
313 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
314 struct bch_alloc_v4 old,
315 struct bch_alloc_v4 new,
316 u64 journal_seq, bool gc)
318 struct bch_fs_usage *fs_usage;
319 struct bch_dev_usage *u;
322 fs_usage = fs_usage_ptr(c, journal_seq, gc);
323 u = dev_usage_ptr(ca, journal_seq, gc);
325 if (bucket_type(old))
326 account_bucket(fs_usage, u, bucket_type(old),
327 -1, -ca->mi.bucket_size);
329 if (bucket_type(new))
330 account_bucket(fs_usage, u, bucket_type(new),
331 1, ca->mi.bucket_size);
333 u->buckets_ec += (int) new.stripe - (int) old.stripe;
334 u->buckets_unavailable +=
335 is_unavailable_bucket(new) - is_unavailable_bucket(old);
337 u->d[old.data_type].sectors -= old.dirty_sectors;
338 u->d[new.data_type].sectors += new.dirty_sectors;
339 u->d[BCH_DATA_cached].sectors +=
340 (int) new.cached_sectors - (int) old.cached_sectors;
342 u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old);
343 u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new);
348 static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
349 struct bucket old, struct bucket new,
350 u64 journal_seq, bool gc)
352 struct bch_alloc_v4 old_a = {
354 .data_type = old.data_type,
355 .dirty_sectors = old.dirty_sectors,
356 .cached_sectors = old.cached_sectors,
357 .stripe = old.stripe,
359 struct bch_alloc_v4 new_a = {
361 .data_type = new.data_type,
362 .dirty_sectors = new.dirty_sectors,
363 .cached_sectors = new.cached_sectors,
364 .stripe = new.stripe,
367 bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
370 static inline int __update_replicas(struct bch_fs *c,
371 struct bch_fs_usage *fs_usage,
372 struct bch_replicas_entry *r,
375 int idx = bch2_replicas_entry_idx(c, r);
380 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
381 fs_usage->replicas[idx] += sectors;
385 static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
386 struct bch_replicas_entry *r, s64 sectors,
387 unsigned journal_seq, bool gc)
389 struct bch_fs_usage __percpu *fs_usage;
391 struct printbuf buf = PRINTBUF;
393 percpu_down_read(&c->mark_lock);
396 idx = bch2_replicas_entry_idx(c, r);
398 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
399 fsck_err(c, "no replicas entry\n"
401 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
402 percpu_up_read(&c->mark_lock);
403 ret = bch2_mark_replicas(c, r);
404 percpu_down_read(&c->mark_lock);
408 idx = bch2_replicas_entry_idx(c, r);
416 fs_usage = fs_usage_ptr(c, journal_seq, gc);
417 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
418 fs_usage->replicas[idx] += sectors;
422 percpu_up_read(&c->mark_lock);
427 static inline int update_cached_sectors(struct bch_fs *c,
429 unsigned dev, s64 sectors,
430 unsigned journal_seq, bool gc)
432 struct bch_replicas_padded r;
434 bch2_replicas_entry_cached(&r.e, dev);
436 return update_replicas(c, k, &r.e, sectors, journal_seq, gc);
439 static struct replicas_delta_list *
440 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
442 struct replicas_delta_list *d = trans->fs_usage_deltas;
443 unsigned new_size = d ? (d->size + more) * 2 : 128;
444 unsigned alloc_size = sizeof(*d) + new_size;
446 WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
448 if (!d || d->used + more > d->size) {
449 d = krealloc(d, alloc_size, GFP_NOIO|__GFP_ZERO);
451 BUG_ON(!d && alloc_size > REPLICAS_DELTA_LIST_MAX);
454 d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOIO);
455 memset(d, 0, REPLICAS_DELTA_LIST_MAX);
457 if (trans->fs_usage_deltas)
458 memcpy(d, trans->fs_usage_deltas,
459 trans->fs_usage_deltas->size + sizeof(*d));
461 new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
462 kfree(trans->fs_usage_deltas);
466 trans->fs_usage_deltas = d;
471 static inline void update_replicas_list(struct btree_trans *trans,
472 struct bch_replicas_entry *r,
475 struct replicas_delta_list *d;
476 struct replicas_delta *n;
482 b = replicas_entry_bytes(r) + 8;
483 d = replicas_deltas_realloc(trans, b);
485 n = (void *) d->d + d->used;
487 memcpy(&n->r, r, replicas_entry_bytes(r));
488 bch2_replicas_entry_sort(&n->r);
492 static inline void update_cached_sectors_list(struct btree_trans *trans,
493 unsigned dev, s64 sectors)
495 struct bch_replicas_padded r;
497 bch2_replicas_entry_cached(&r.e, dev);
499 update_replicas_list(trans, &r.e, sectors);
502 int bch2_mark_alloc(struct btree_trans *trans,
503 struct bkey_s_c old, struct bkey_s_c new,
506 bool gc = flags & BTREE_TRIGGER_GC;
507 u64 journal_seq = trans->journal_res.seq;
508 struct bch_fs *c = trans->c;
509 struct bch_alloc_v4 old_a, new_a;
510 struct bch_dev *ca = bch_dev_bkey_exists(c, new.k->p.inode);
513 if (bch2_trans_inconsistent_on(new.k->p.offset < ca->mi.first_bucket ||
514 new.k->p.offset >= ca->mi.nbuckets, trans,
515 "alloc key outside range of device's buckets"))
519 * alloc btree is read in by bch2_alloc_read, not gc:
521 if ((flags & BTREE_TRIGGER_GC) &&
522 !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
525 bch2_alloc_to_v4(old, &old_a);
526 bch2_alloc_to_v4(new, &new_a);
528 if ((flags & BTREE_TRIGGER_INSERT) &&
529 !old_a.data_type != !new_a.data_type &&
530 new.k->type == KEY_TYPE_alloc_v4) {
531 struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
533 BUG_ON(!journal_seq);
536 * If the btree updates referring to a bucket weren't flushed
537 * before the bucket became empty again, then the we don't have
538 * to wait on a journal flush before we can reuse the bucket:
540 new_a.journal_seq = !new_a.data_type &&
541 (journal_seq == v->journal_seq ||
542 bch2_journal_noflush_seq(&c->journal, v->journal_seq))
544 v->journal_seq = new_a.journal_seq;
547 if (old_a.data_type && !new_a.data_type && new_a.journal_seq) {
548 ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
549 c->journal.flushed_seq_ondisk,
550 new.k->p.inode, new.k->p.offset,
553 bch2_fs_fatal_error(c,
554 "error setting bucket_needs_journal_commit: %i", ret);
559 if (!new_a.data_type &&
560 (!new_a.journal_seq || new_a.journal_seq < c->journal.flushed_seq_ondisk))
561 closure_wake_up(&c->freelist_wait);
563 if ((flags & BTREE_TRIGGER_INSERT) &&
564 BCH_ALLOC_V4_NEED_DISCARD(&new_a) &&
568 if (!old_a.data_type &&
570 should_invalidate_buckets(ca))
571 bch2_do_invalidates(c);
573 if (bucket_state(new_a) == BUCKET_need_gc_gens) {
574 atomic_inc(&c->kick_gc);
575 wake_up_process(c->gc_thread);
578 percpu_down_read(&c->mark_lock);
579 if (!gc && new_a.gen != old_a.gen)
580 *bucket_gen(ca, new.k->p.offset) = new_a.gen;
582 bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
585 struct bucket *g = gc_bucket(ca, new.k->p.offset);
591 g->data_type = new_a.data_type;
592 g->stripe = new_a.stripe;
593 g->stripe_redundancy = new_a.stripe_redundancy;
594 g->dirty_sectors = new_a.dirty_sectors;
595 g->cached_sectors = new_a.cached_sectors;
599 percpu_up_read(&c->mark_lock);
602 * need to know if we're getting called from the invalidate path or
606 if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
607 old_a.cached_sectors) {
608 ret = update_cached_sectors(c, new, ca->dev_idx,
609 -old_a.cached_sectors,
612 bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors");
616 trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
617 old_a.cached_sectors);
623 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
624 size_t b, enum bch_data_type data_type,
625 unsigned sectors, struct gc_pos pos,
628 struct bucket old, new, *g;
631 BUG_ON(!(flags & BTREE_TRIGGER_GC));
632 BUG_ON(data_type != BCH_DATA_sb &&
633 data_type != BCH_DATA_journal);
636 * Backup superblock might be past the end of our normal usable space:
638 if (b >= ca->mi.nbuckets)
641 percpu_down_read(&c->mark_lock);
642 g = gc_bucket(ca, b);
647 g->data_type = data_type;
648 g->dirty_sectors += sectors;
649 overflow = g->dirty_sectors < sectors;
654 bch2_fs_inconsistent_on(old.data_type &&
655 old.data_type != data_type, c,
656 "different types of data in same bucket: %s, %s",
657 bch2_data_types[old.data_type],
658 bch2_data_types[data_type]);
660 bch2_fs_inconsistent_on(overflow, c,
661 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
662 ca->dev_idx, b, new.gen,
663 bch2_data_types[old.data_type ?: data_type],
664 old.dirty_sectors, sectors);
666 bch2_dev_usage_update_m(c, ca, old, new, 0, true);
667 percpu_up_read(&c->mark_lock);
670 static s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
672 EBUG_ON(sectors < 0);
674 return p.crc.compression_type &&
675 p.crc.compression_type != BCH_COMPRESSION_TYPE_incompressible
676 ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
677 p.crc.uncompressed_size)
681 static int check_bucket_ref(struct bch_fs *c,
683 const struct bch_extent_ptr *ptr,
684 s64 sectors, enum bch_data_type ptr_data_type,
685 u8 b_gen, u8 bucket_data_type,
686 u32 dirty_sectors, u32 cached_sectors)
688 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
689 size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
690 u16 bucket_sectors = !ptr->cached
693 struct printbuf buf = PRINTBUF;
696 if (gen_after(ptr->gen, b_gen)) {
697 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
698 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
700 ptr->dev, bucket_nr, b_gen,
701 bch2_data_types[bucket_data_type ?: ptr_data_type],
703 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
708 if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
709 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
710 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
712 ptr->dev, bucket_nr, b_gen,
713 bch2_data_types[bucket_data_type ?: ptr_data_type],
715 (printbuf_reset(&buf),
716 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
721 if (b_gen != ptr->gen && !ptr->cached) {
722 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
723 "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
725 ptr->dev, bucket_nr, b_gen,
726 *bucket_gen(ca, bucket_nr),
727 bch2_data_types[bucket_data_type ?: ptr_data_type],
729 (printbuf_reset(&buf),
730 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
735 if (b_gen != ptr->gen) {
740 if (bucket_data_type && ptr_data_type &&
741 bucket_data_type != ptr_data_type) {
742 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
743 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
745 ptr->dev, bucket_nr, b_gen,
746 bch2_data_types[bucket_data_type],
747 bch2_data_types[ptr_data_type],
748 (printbuf_reset(&buf),
749 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
754 if ((unsigned) (bucket_sectors + sectors) > U32_MAX) {
755 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
756 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
758 ptr->dev, bucket_nr, b_gen,
759 bch2_data_types[bucket_data_type ?: ptr_data_type],
760 bucket_sectors, sectors,
761 (printbuf_reset(&buf),
762 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
771 static int mark_stripe_bucket(struct btree_trans *trans,
776 struct bch_fs *c = trans->c;
777 u64 journal_seq = trans->journal_res.seq;
778 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
779 unsigned nr_data = s->nr_blocks - s->nr_redundant;
780 bool parity = ptr_idx >= nr_data;
781 enum bch_data_type data_type = parity ? BCH_DATA_parity : 0;
782 s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
783 const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
784 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
785 struct bucket old, new, *g;
786 struct printbuf buf = PRINTBUF;
789 BUG_ON(!(flags & BTREE_TRIGGER_GC));
791 /* * XXX doesn't handle deletion */
793 percpu_down_read(&c->mark_lock);
795 g = PTR_GC_BUCKET(ca, ptr);
797 if (g->dirty_sectors ||
798 (g->stripe && g->stripe != k.k->p.offset)) {
799 bch2_fs_inconsistent(c,
800 "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
801 ptr->dev, PTR_BUCKET_NR(ca, ptr), g->gen,
802 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
810 ret = check_bucket_ref(c, k, ptr, sectors, data_type,
811 new.gen, new.data_type,
812 new.dirty_sectors, new.cached_sectors);
818 new.dirty_sectors += sectors;
820 new.data_type = data_type;
822 g->stripe = k.k->p.offset;
823 g->stripe_redundancy = s->nr_redundant;
828 bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
830 percpu_up_read(&c->mark_lock);
835 static int __mark_pointer(struct btree_trans *trans,
837 const struct bch_extent_ptr *ptr,
838 s64 sectors, enum bch_data_type ptr_data_type,
839 u8 bucket_gen, u8 *bucket_data_type,
840 u32 *dirty_sectors, u32 *cached_sectors)
842 u32 *dst_sectors = !ptr->cached
845 int ret = check_bucket_ref(trans->c, k, ptr, sectors, ptr_data_type,
846 bucket_gen, *bucket_data_type,
847 *dirty_sectors, *cached_sectors);
852 *dst_sectors += sectors;
853 *bucket_data_type = *dirty_sectors || *cached_sectors
858 static int bch2_mark_pointer(struct btree_trans *trans,
860 struct extent_ptr_decoded p,
861 s64 sectors, enum bch_data_type data_type,
864 u64 journal_seq = trans->journal_res.seq;
865 struct bch_fs *c = trans->c;
866 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
867 struct bucket old, new, *g;
871 BUG_ON(!(flags & BTREE_TRIGGER_GC));
873 percpu_down_read(&c->mark_lock);
874 g = PTR_GC_BUCKET(ca, &p.ptr);
879 bucket_data_type = g->data_type;
881 ret = __mark_pointer(trans, k, &p.ptr, sectors,
891 g->data_type = bucket_data_type;
896 bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
898 percpu_up_read(&c->mark_lock);
903 static int bch2_mark_stripe_ptr(struct btree_trans *trans,
905 struct bch_extent_stripe_ptr p,
906 enum bch_data_type data_type,
910 struct bch_fs *c = trans->c;
911 struct bch_replicas_padded r;
914 BUG_ON(!(flags & BTREE_TRIGGER_GC));
916 m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
918 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
923 spin_lock(&c->ec_stripes_heap_lock);
925 if (!m || !m->alive) {
926 spin_unlock(&c->ec_stripes_heap_lock);
927 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
929 bch2_inconsistent_error(c);
933 m->block_sectors[p.block] += sectors;
936 spin_unlock(&c->ec_stripes_heap_lock);
938 r.e.data_type = data_type;
939 update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
944 int bch2_mark_extent(struct btree_trans *trans,
945 struct bkey_s_c old, struct bkey_s_c new,
948 u64 journal_seq = trans->journal_res.seq;
949 struct bch_fs *c = trans->c;
950 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
951 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
952 const union bch_extent_entry *entry;
953 struct extent_ptr_decoded p;
954 struct bch_replicas_padded r;
955 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
958 s64 sectors = bkey_is_btree_ptr(k.k)
961 s64 dirty_sectors = 0;
965 BUG_ON(!(flags & BTREE_TRIGGER_GC));
967 r.e.data_type = data_type;
971 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
972 s64 disk_sectors = ptr_disk_sectors(sectors, p);
974 if (flags & BTREE_TRIGGER_OVERWRITE)
975 disk_sectors = -disk_sectors;
977 ret = bch2_mark_pointer(trans, k, p, disk_sectors,
986 ret = update_cached_sectors(c, k, p.ptr.dev,
987 disk_sectors, journal_seq, true);
989 bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
993 } else if (!p.has_ec) {
994 dirty_sectors += disk_sectors;
995 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
997 ret = bch2_mark_stripe_ptr(trans, k, p.ec, data_type,
998 disk_sectors, flags);
1003 * There may be other dirty pointers in this extent, but
1004 * if so they're not required for mounting if we have an
1005 * erasure coded pointer in this extent:
1007 r.e.nr_required = 0;
1012 ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true);
1014 struct printbuf buf = PRINTBUF;
1016 bch2_bkey_val_to_text(&buf, c, k);
1017 bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
1018 printbuf_exit(&buf);
1026 int bch2_mark_stripe(struct btree_trans *trans,
1027 struct bkey_s_c old, struct bkey_s_c new,
1030 bool gc = flags & BTREE_TRIGGER_GC;
1031 u64 journal_seq = trans->journal_res.seq;
1032 struct bch_fs *c = trans->c;
1033 u64 idx = new.k->p.offset;
1034 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1035 ? bkey_s_c_to_stripe(old).v : NULL;
1036 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1037 ? bkey_s_c_to_stripe(new).v : NULL;
1041 BUG_ON(gc && old_s);
1044 struct stripe *m = genradix_ptr(&c->stripes, idx);
1046 if (!m || (old_s && !m->alive)) {
1047 struct printbuf buf1 = PRINTBUF;
1048 struct printbuf buf2 = PRINTBUF;
1050 bch2_bkey_val_to_text(&buf1, c, old);
1051 bch2_bkey_val_to_text(&buf2, c, new);
1052 bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
1054 "new %s", idx, buf1.buf, buf2.buf);
1055 printbuf_exit(&buf2);
1056 printbuf_exit(&buf1);
1057 bch2_inconsistent_error(c);
1062 spin_lock(&c->ec_stripes_heap_lock);
1063 bch2_stripes_heap_del(c, m, idx);
1064 spin_unlock(&c->ec_stripes_heap_lock);
1066 memset(m, 0, sizeof(*m));
1069 m->sectors = le16_to_cpu(new_s->sectors);
1070 m->algorithm = new_s->algorithm;
1071 m->nr_blocks = new_s->nr_blocks;
1072 m->nr_redundant = new_s->nr_redundant;
1073 m->blocks_nonempty = 0;
1075 for (i = 0; i < new_s->nr_blocks; i++)
1076 m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
1078 spin_lock(&c->ec_stripes_heap_lock);
1079 bch2_stripes_heap_update(c, m, idx);
1080 spin_unlock(&c->ec_stripes_heap_lock);
1083 struct gc_stripe *m =
1084 genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
1087 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
1092 * This will be wrong when we bring back runtime gc: we should
1093 * be unmarking the old key and then marking the new key
1096 m->sectors = le16_to_cpu(new_s->sectors);
1097 m->nr_blocks = new_s->nr_blocks;
1098 m->nr_redundant = new_s->nr_redundant;
1100 for (i = 0; i < new_s->nr_blocks; i++)
1101 m->ptrs[i] = new_s->ptrs[i];
1103 bch2_bkey_to_replicas(&m->r.e, new);
1106 * gc recalculates this field from stripe ptr
1109 memset(m->block_sectors, 0, sizeof(m->block_sectors));
1111 for (i = 0; i < new_s->nr_blocks; i++) {
1112 ret = mark_stripe_bucket(trans, new, i, flags);
1117 ret = update_replicas(c, new, &m->r.e,
1118 ((s64) m->sectors * m->nr_redundant),
1121 struct printbuf buf = PRINTBUF;
1123 bch2_bkey_val_to_text(&buf, c, new);
1124 bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
1125 printbuf_exit(&buf);
1133 int bch2_mark_inode(struct btree_trans *trans,
1134 struct bkey_s_c old, struct bkey_s_c new,
1137 struct bch_fs *c = trans->c;
1138 struct bch_fs_usage __percpu *fs_usage;
1139 u64 journal_seq = trans->journal_res.seq;
1141 if (flags & BTREE_TRIGGER_INSERT) {
1142 struct bch_inode_v2 *v = (struct bch_inode_v2 *) new.v;
1144 BUG_ON(!journal_seq);
1145 BUG_ON(new.k->type != KEY_TYPE_inode_v2);
1147 v->bi_journal_seq = cpu_to_le64(journal_seq);
1150 if (flags & BTREE_TRIGGER_GC) {
1151 percpu_down_read(&c->mark_lock);
1154 fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
1155 fs_usage->nr_inodes += bkey_is_inode(new.k);
1156 fs_usage->nr_inodes -= bkey_is_inode(old.k);
1159 percpu_up_read(&c->mark_lock);
1164 int bch2_mark_reservation(struct btree_trans *trans,
1165 struct bkey_s_c old, struct bkey_s_c new,
1168 struct bch_fs *c = trans->c;
1169 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
1170 struct bch_fs_usage __percpu *fs_usage;
1171 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1172 s64 sectors = (s64) k.k->size;
1174 BUG_ON(!(flags & BTREE_TRIGGER_GC));
1176 if (flags & BTREE_TRIGGER_OVERWRITE)
1178 sectors *= replicas;
1180 percpu_down_read(&c->mark_lock);
1183 fs_usage = fs_usage_ptr(c, trans->journal_res.seq, flags & BTREE_TRIGGER_GC);
1184 replicas = clamp_t(unsigned, replicas, 1,
1185 ARRAY_SIZE(fs_usage->persistent_reserved));
1187 fs_usage->reserved += sectors;
1188 fs_usage->persistent_reserved[replicas - 1] += sectors;
1191 percpu_up_read(&c->mark_lock);
1196 static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
1197 struct bkey_s_c_reflink_p p,
1199 u64 *idx, unsigned flags, size_t r_idx)
1201 struct bch_fs *c = trans->c;
1202 struct reflink_gc *r;
1203 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1206 struct printbuf buf = PRINTBUF;
1208 if (r_idx >= c->reflink_gc_nr)
1211 r = genradix_ptr(&c->reflink_gc_table, r_idx);
1212 next_idx = min(next_idx, r->offset - r->size);
1213 if (*idx < next_idx)
1216 BUG_ON((s64) r->refcount + add < 0);
1222 if (fsck_err(c, "pointer to missing indirect extent\n"
1224 " missing range %llu-%llu",
1225 (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
1227 struct bkey_i_error new;
1230 new.k.type = KEY_TYPE_error;
1231 new.k.p = bkey_start_pos(p.k);
1232 new.k.p.offset += *idx - start;
1233 bch2_key_resize(&new.k, next_idx - *idx);
1234 ret = __bch2_btree_insert(trans, BTREE_ID_extents, &new.k_i);
1239 printbuf_exit(&buf);
1243 int bch2_mark_reflink_p(struct btree_trans *trans,
1244 struct bkey_s_c old, struct bkey_s_c new,
1247 struct bch_fs *c = trans->c;
1248 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
1249 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
1250 struct reflink_gc *ref;
1252 u64 idx = le64_to_cpu(p.v->idx), start = idx;
1253 u64 end = le64_to_cpu(p.v->idx) + p.k->size;
1256 BUG_ON(!(flags & BTREE_TRIGGER_GC));
1258 if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) {
1259 idx -= le32_to_cpu(p.v->front_pad);
1260 end += le32_to_cpu(p.v->back_pad);
1264 r = c->reflink_gc_nr;
1266 m = l + (r - l) / 2;
1268 ref = genradix_ptr(&c->reflink_gc_table, m);
1269 if (ref->offset <= idx)
1275 while (idx < end && !ret)
1276 ret = __bch2_mark_reflink_p(trans, p, start, end,
1282 static noinline __cold
1283 void fs_usage_apply_warn(struct btree_trans *trans,
1284 unsigned disk_res_sectors,
1285 s64 should_not_have_added)
1287 struct bch_fs *c = trans->c;
1288 struct btree_insert_entry *i;
1289 struct printbuf buf = PRINTBUF;
1291 bch_err(c, "disk usage increased %lli more than %u sectors reserved",
1292 should_not_have_added, disk_res_sectors);
1294 trans_for_each_update(trans, i) {
1295 struct bkey_s_c old = { &i->old_k, i->old_v };
1297 pr_err("while inserting");
1298 printbuf_reset(&buf);
1299 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
1300 pr_err(" %s", buf.buf);
1301 pr_err("overlapping with");
1302 printbuf_reset(&buf);
1303 bch2_bkey_val_to_text(&buf, c, old);
1304 pr_err(" %s", buf.buf);
1308 printbuf_exit(&buf);
1311 int bch2_trans_fs_usage_apply(struct btree_trans *trans,
1312 struct replicas_delta_list *deltas)
1314 struct bch_fs *c = trans->c;
1315 static int warned_disk_usage = 0;
1317 unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1318 struct replicas_delta *d = deltas->d, *d2;
1319 struct replicas_delta *top = (void *) deltas->d + deltas->used;
1320 struct bch_fs_usage *dst;
1321 s64 added = 0, should_not_have_added;
1324 percpu_down_read(&c->mark_lock);
1326 dst = fs_usage_ptr(c, trans->journal_res.seq, false);
1328 for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
1329 switch (d->r.data_type) {
1330 case BCH_DATA_btree:
1332 case BCH_DATA_parity:
1336 if (__update_replicas(c, dst, &d->r, d->delta))
1340 dst->nr_inodes += deltas->nr_inodes;
1342 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
1343 added += deltas->persistent_reserved[i];
1344 dst->reserved += deltas->persistent_reserved[i];
1345 dst->persistent_reserved[i] += deltas->persistent_reserved[i];
1349 * Not allowed to reduce sectors_available except by getting a
1352 should_not_have_added = added - (s64) disk_res_sectors;
1353 if (unlikely(should_not_have_added > 0)) {
1354 u64 old, new, v = atomic64_read(&c->sectors_available);
1358 new = max_t(s64, 0, old - should_not_have_added);
1359 } while ((v = atomic64_cmpxchg(&c->sectors_available,
1362 added -= should_not_have_added;
1367 trans->disk_res->sectors -= added;
1368 this_cpu_sub(*c->online_reserved, added);
1372 percpu_up_read(&c->mark_lock);
1374 if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
1375 fs_usage_apply_warn(trans, disk_res_sectors, should_not_have_added);
1378 /* revert changes: */
1379 for (d2 = deltas->d; d2 != d; d2 = replicas_delta_next(d2))
1380 BUG_ON(__update_replicas(c, dst, &d2->r, -d2->delta));
1383 percpu_up_read(&c->mark_lock);
1389 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1390 struct bkey_s_c k, struct extent_ptr_decoded p,
1391 s64 sectors, enum bch_data_type data_type)
1393 struct btree_iter iter;
1394 struct bkey_i_alloc_v4 *a;
1397 a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(trans->c, &p.ptr));
1401 ret = __mark_pointer(trans, k, &p.ptr, sectors, data_type,
1402 a->v.gen, &a->v.data_type,
1403 &a->v.dirty_sectors, &a->v.cached_sectors);
1407 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1411 bch2_trans_iter_exit(trans, &iter);
1415 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1416 struct extent_ptr_decoded p,
1417 s64 sectors, enum bch_data_type data_type)
1419 struct btree_iter iter;
1421 struct bkey_i_stripe *s;
1422 struct bch_replicas_padded r;
1425 bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes, POS(0, p.ec.idx),
1427 BTREE_ITER_WITH_UPDATES);
1428 k = bch2_btree_iter_peek_slot(&iter);
1433 if (k.k->type != KEY_TYPE_stripe) {
1434 bch2_trans_inconsistent(trans,
1435 "pointer to nonexistent stripe %llu",
1441 if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) {
1442 bch2_trans_inconsistent(trans,
1443 "stripe pointer doesn't match stripe %llu",
1449 s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1450 ret = PTR_ERR_OR_ZERO(s);
1454 bkey_reassemble(&s->k_i, k);
1455 stripe_blockcount_set(&s->v, p.ec.block,
1456 stripe_blockcount_get(&s->v, p.ec.block) +
1459 ret = bch2_trans_update(trans, &iter, &s->k_i, 0);
1463 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1464 r.e.data_type = data_type;
1465 update_replicas_list(trans, &r.e, sectors);
1467 bch2_trans_iter_exit(trans, &iter);
1471 int bch2_trans_mark_extent(struct btree_trans *trans,
1472 struct bkey_s_c old, struct bkey_i *new,
1475 struct bch_fs *c = trans->c;
1476 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
1478 : bkey_i_to_s_c(new);
1479 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1480 const union bch_extent_entry *entry;
1481 struct extent_ptr_decoded p;
1482 struct bch_replicas_padded r;
1483 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
1486 s64 sectors = bkey_is_btree_ptr(k.k)
1489 s64 dirty_sectors = 0;
1493 r.e.data_type = data_type;
1495 r.e.nr_required = 1;
1497 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1498 s64 disk_sectors = ptr_disk_sectors(sectors, p);
1500 if (flags & BTREE_TRIGGER_OVERWRITE)
1501 disk_sectors = -disk_sectors;
1503 ret = bch2_trans_mark_pointer(trans, k, p,
1504 disk_sectors, data_type);
1512 update_cached_sectors_list(trans, p.ptr.dev,
1514 } else if (!p.has_ec) {
1515 dirty_sectors += disk_sectors;
1516 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1518 ret = bch2_trans_mark_stripe_ptr(trans, p,
1519 disk_sectors, data_type);
1523 r.e.nr_required = 0;
1528 update_replicas_list(trans, &r.e, dirty_sectors);
1533 static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
1534 struct bkey_s_c_stripe s,
1535 unsigned idx, bool deleting)
1537 struct bch_fs *c = trans->c;
1538 const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
1539 struct btree_iter iter;
1540 struct bkey_i_alloc_v4 *a;
1541 enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
1542 ? BCH_DATA_parity : 0;
1543 s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
1549 a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(c, ptr));
1553 ret = check_bucket_ref(c, s.s_c, ptr, sectors, data_type,
1554 a->v.gen, a->v.data_type,
1555 a->v.dirty_sectors, a->v.cached_sectors);
1560 if (bch2_trans_inconsistent_on(a->v.stripe ||
1561 a->v.stripe_redundancy, trans,
1562 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
1563 iter.pos.inode, iter.pos.offset, a->v.gen,
1564 bch2_data_types[a->v.data_type],
1566 a->v.stripe, s.k->p.offset)) {
1571 if (bch2_trans_inconsistent_on(data_type && a->v.dirty_sectors, trans,
1572 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
1573 iter.pos.inode, iter.pos.offset, a->v.gen,
1574 bch2_data_types[a->v.data_type],
1581 a->v.stripe = s.k->p.offset;
1582 a->v.stripe_redundancy = s.v->nr_redundant;
1584 if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
1585 a->v.stripe_redundancy != s.v->nr_redundant, trans,
1586 "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
1587 iter.pos.inode, iter.pos.offset, a->v.gen,
1588 s.k->p.offset, a->v.stripe)) {
1594 a->v.stripe_redundancy = 0;
1597 a->v.dirty_sectors += sectors;
1599 a->v.data_type = !deleting ? data_type : 0;
1601 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1605 bch2_trans_iter_exit(trans, &iter);
1609 int bch2_trans_mark_stripe(struct btree_trans *trans,
1610 struct bkey_s_c old, struct bkey_i *new,
1613 const struct bch_stripe *old_s = NULL;
1614 struct bch_stripe *new_s = NULL;
1615 struct bch_replicas_padded r;
1616 unsigned i, nr_blocks;
1619 if (old.k->type == KEY_TYPE_stripe)
1620 old_s = bkey_s_c_to_stripe(old).v;
1621 if (new->k.type == KEY_TYPE_stripe)
1622 new_s = &bkey_i_to_stripe(new)->v;
1625 * If the pointers aren't changing, we don't need to do anything:
1627 if (new_s && old_s &&
1628 new_s->nr_blocks == old_s->nr_blocks &&
1629 new_s->nr_redundant == old_s->nr_redundant &&
1630 !memcmp(old_s->ptrs, new_s->ptrs,
1631 new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
1634 BUG_ON(new_s && old_s &&
1635 (new_s->nr_blocks != old_s->nr_blocks ||
1636 new_s->nr_redundant != old_s->nr_redundant));
1638 nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
1641 s64 sectors = le16_to_cpu(new_s->sectors);
1643 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(new));
1644 update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant);
1648 s64 sectors = -((s64) le16_to_cpu(old_s->sectors));
1650 bch2_bkey_to_replicas(&r.e, old);
1651 update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant);
1654 for (i = 0; i < nr_blocks; i++) {
1655 if (new_s && old_s &&
1656 !memcmp(&new_s->ptrs[i],
1658 sizeof(new_s->ptrs[i])))
1662 ret = bch2_trans_mark_stripe_bucket(trans,
1663 bkey_i_to_s_c_stripe(new), i, false);
1669 ret = bch2_trans_mark_stripe_bucket(trans,
1670 bkey_s_c_to_stripe(old), i, true);
1679 int bch2_trans_mark_inode(struct btree_trans *trans,
1680 struct bkey_s_c old,
1684 int nr = bkey_is_inode(&new->k) - bkey_is_inode(old.k);
1687 struct replicas_delta_list *d =
1688 replicas_deltas_realloc(trans, 0);
1695 int bch2_trans_mark_reservation(struct btree_trans *trans,
1696 struct bkey_s_c old,
1700 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
1702 : bkey_i_to_s_c(new);
1703 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1704 s64 sectors = (s64) k.k->size;
1705 struct replicas_delta_list *d;
1707 if (flags & BTREE_TRIGGER_OVERWRITE)
1709 sectors *= replicas;
1711 d = replicas_deltas_realloc(trans, 0);
1713 replicas = clamp_t(unsigned, replicas, 1,
1714 ARRAY_SIZE(d->persistent_reserved));
1716 d->persistent_reserved[replicas - 1] += sectors;
1720 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1721 struct bkey_s_c_reflink_p p,
1722 u64 *idx, unsigned flags)
1724 struct bch_fs *c = trans->c;
1725 struct btree_iter iter;
1729 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1730 struct printbuf buf = PRINTBUF;
1733 bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx),
1735 BTREE_ITER_WITH_UPDATES);
1736 k = bch2_btree_iter_peek_slot(&iter);
1741 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1742 ret = PTR_ERR_OR_ZERO(n);
1746 bkey_reassemble(n, k);
1748 refcount = bkey_refcount(n);
1750 bch2_bkey_val_to_text(&buf, c, p.s_c);
1751 bch2_trans_inconsistent(trans,
1752 "nonexistent indirect extent at %llu while marking\n %s",
1758 if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
1759 bch2_bkey_val_to_text(&buf, c, p.s_c);
1760 bch2_trans_inconsistent(trans,
1761 "indirect extent refcount underflow at %llu while marking\n %s",
1767 if (flags & BTREE_TRIGGER_INSERT) {
1768 struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
1771 pad = max_t(s64, le32_to_cpu(v->front_pad),
1772 le64_to_cpu(v->idx) - bkey_start_offset(k.k));
1773 BUG_ON(pad > U32_MAX);
1774 v->front_pad = cpu_to_le32(pad);
1776 pad = max_t(s64, le32_to_cpu(v->back_pad),
1777 k.k->p.offset - p.k->size - le64_to_cpu(v->idx));
1778 BUG_ON(pad > U32_MAX);
1779 v->back_pad = cpu_to_le32(pad);
1782 le64_add_cpu(refcount, add);
1784 bch2_btree_iter_set_pos_to_extent_start(&iter);
1785 ret = bch2_trans_update(trans, &iter, n, 0);
1789 *idx = k.k->p.offset;
1791 bch2_trans_iter_exit(trans, &iter);
1792 printbuf_exit(&buf);
1796 int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1797 struct bkey_s_c old,
1801 struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
1803 : bkey_i_to_s_c(new);
1804 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
1808 if (flags & BTREE_TRIGGER_INSERT) {
1809 struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
1811 v->front_pad = v->back_pad = 0;
1814 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
1815 end_idx = le64_to_cpu(p.v->idx) + p.k->size +
1816 le32_to_cpu(p.v->back_pad);
1818 while (idx < end_idx && !ret)
1819 ret = __bch2_trans_mark_reflink_p(trans, p, &idx, flags);
1824 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1825 struct bch_dev *ca, size_t b,
1826 enum bch_data_type type,
1829 struct bch_fs *c = trans->c;
1830 struct btree_iter iter;
1831 struct bkey_i_alloc_v4 *a;
1835 * Backup superblock might be past the end of our normal usable space:
1837 if (b >= ca->mi.nbuckets)
1840 a = bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
1844 if (a->v.data_type && a->v.data_type != type) {
1845 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1846 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
1848 iter.pos.inode, iter.pos.offset, a->v.gen,
1849 bch2_data_types[a->v.data_type],
1850 bch2_data_types[type],
1851 bch2_data_types[type]);
1856 a->v.data_type = type;
1857 a->v.dirty_sectors = sectors;
1859 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1863 bch2_trans_iter_exit(trans, &iter);
1867 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1868 struct bch_dev *ca, size_t b,
1869 enum bch_data_type type,
1872 return __bch2_trans_do(trans, NULL, NULL, 0,
1873 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
1876 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
1879 enum bch_data_type type,
1880 u64 *bucket, unsigned *bucket_sectors)
1883 u64 b = sector_to_bucket(ca, start);
1885 min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1887 if (b != *bucket && *bucket_sectors) {
1888 int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1889 type, *bucket_sectors);
1893 *bucket_sectors = 0;
1897 *bucket_sectors += sectors;
1899 } while (start < end);
1904 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
1907 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1909 unsigned i, bucket_sectors = 0;
1912 for (i = 0; i < layout->nr_superblocks; i++) {
1913 u64 offset = le64_to_cpu(layout->sb_offset[i]);
1915 if (offset == BCH_SB_SECTOR) {
1916 ret = bch2_trans_mark_metadata_sectors(trans, ca,
1918 BCH_DATA_sb, &bucket, &bucket_sectors);
1923 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1924 offset + (1 << layout->sb_max_size_bits),
1925 BCH_DATA_sb, &bucket, &bucket_sectors);
1930 if (bucket_sectors) {
1931 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1932 bucket, BCH_DATA_sb, bucket_sectors);
1937 for (i = 0; i < ca->journal.nr; i++) {
1938 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1939 ca->journal.buckets[i],
1940 BCH_DATA_journal, ca->mi.bucket_size);
1948 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
1950 return bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
1951 __bch2_trans_mark_dev_sb(&trans, ca));
1954 /* Disk reservations: */
1956 #define SECTORS_CACHE 1024
1958 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1959 u64 sectors, int flags)
1961 struct bch_fs_pcpu *pcpu;
1963 s64 sectors_available;
1966 percpu_down_read(&c->mark_lock);
1968 pcpu = this_cpu_ptr(c->pcpu);
1970 if (sectors <= pcpu->sectors_available)
1973 v = atomic64_read(&c->sectors_available);
1976 get = min((u64) sectors + SECTORS_CACHE, old);
1978 if (get < sectors) {
1982 } while ((v = atomic64_cmpxchg(&c->sectors_available,
1983 old, old - get)) != old);
1985 pcpu->sectors_available += get;
1988 pcpu->sectors_available -= sectors;
1989 this_cpu_add(*c->online_reserved, sectors);
1990 res->sectors += sectors;
1993 percpu_up_read(&c->mark_lock);
1997 mutex_lock(&c->sectors_available_lock);
1999 percpu_u64_set(&c->pcpu->sectors_available, 0);
2000 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
2002 if (sectors <= sectors_available ||
2003 (flags & BCH_DISK_RESERVATION_NOFAIL)) {
2004 atomic64_set(&c->sectors_available,
2005 max_t(s64, 0, sectors_available - sectors));
2006 this_cpu_add(*c->online_reserved, sectors);
2007 res->sectors += sectors;
2010 atomic64_set(&c->sectors_available, sectors_available);
2014 mutex_unlock(&c->sectors_available_lock);
2015 percpu_up_read(&c->mark_lock);
2020 /* Startup/shutdown: */
2022 static void bucket_gens_free_rcu(struct rcu_head *rcu)
2024 struct bucket_gens *buckets =
2025 container_of(rcu, struct bucket_gens, rcu);
2027 kvpfree(buckets, sizeof(*buckets) + buckets->nbuckets);
2030 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2032 struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
2033 unsigned long *buckets_nouse = NULL;
2034 bool resize = ca->bucket_gens != NULL;
2037 if (!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
2038 GFP_KERNEL|__GFP_ZERO)) ||
2039 (c->opts.buckets_nouse &&
2040 !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2041 sizeof(unsigned long),
2042 GFP_KERNEL|__GFP_ZERO))))
2045 bucket_gens->first_bucket = ca->mi.first_bucket;
2046 bucket_gens->nbuckets = nbuckets;
2048 bch2_copygc_stop(c);
2051 down_write(&c->gc_lock);
2052 down_write(&ca->bucket_lock);
2053 percpu_down_write(&c->mark_lock);
2056 old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
2059 size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets);
2061 memcpy(bucket_gens->b,
2065 memcpy(buckets_nouse,
2067 BITS_TO_LONGS(n) * sizeof(unsigned long));
2070 rcu_assign_pointer(ca->bucket_gens, bucket_gens);
2071 bucket_gens = old_bucket_gens;
2073 swap(ca->buckets_nouse, buckets_nouse);
2075 nbuckets = ca->mi.nbuckets;
2078 percpu_up_write(&c->mark_lock);
2079 up_write(&ca->bucket_lock);
2080 up_write(&c->gc_lock);
2085 kvpfree(buckets_nouse,
2086 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2088 call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
2093 void bch2_dev_buckets_free(struct bch_dev *ca)
2097 kvpfree(ca->buckets_nouse,
2098 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2099 kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
2100 sizeof(struct bucket_gens) + ca->mi.nbuckets);
2102 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
2103 free_percpu(ca->usage[i]);
2104 kfree(ca->usage_base);
2107 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2111 ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
2112 if (!ca->usage_base)
2115 for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
2116 ca->usage[i] = alloc_percpu(struct bch_dev_usage);
2121 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;