1 // SPDX-License-Identifier: GPL-2.0
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
9 #include "alloc_background.h"
12 #include "btree_update.h"
19 #include "subvolume.h"
21 #include <linux/preempt.h>
22 #include <trace/events/bcachefs.h>
24 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
25 enum bch_data_type data_type,
30 fs_usage->btree += sectors;
34 fs_usage->data += sectors;
37 fs_usage->cached += sectors;
45 * Clear journal_seq_valid for buckets for which it's not needed, to prevent
48 void bch2_bucket_seq_cleanup(struct bch_fs *c)
50 u64 journal_seq = atomic64_read(&c->journal.seq);
51 u16 last_seq_ondisk = c->journal.last_seq_ondisk;
53 struct bucket_array *buckets;
58 if (journal_seq - c->last_bucket_seq_cleanup <
59 (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
62 c->last_bucket_seq_cleanup = journal_seq;
64 for_each_member_device(ca, c, i) {
65 down_read(&ca->bucket_lock);
66 buckets = bucket_array(ca);
68 for_each_bucket(g, buckets) {
69 bucket_cmpxchg(g, m, ({
70 if (!m.journal_seq_valid ||
71 bucket_needs_journal_commit(m, last_seq_ondisk))
74 m.journal_seq_valid = 0;
77 up_read(&ca->bucket_lock);
81 void bch2_fs_usage_initialize(struct bch_fs *c)
83 struct bch_fs_usage *usage;
87 percpu_down_write(&c->mark_lock);
88 usage = c->usage_base;
90 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
91 bch2_fs_usage_acc_to_base(c, i);
93 for (i = 0; i < BCH_REPLICAS_MAX; i++)
94 usage->reserved += usage->persistent_reserved[i];
96 for (i = 0; i < c->replicas.nr; i++) {
97 struct bch_replicas_entry *e =
98 cpu_replicas_entry(&c->replicas, i);
100 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
103 for_each_member_device(ca, c, i) {
104 struct bch_dev_usage dev = bch2_dev_usage_read(ca);
106 usage->hidden += (dev.d[BCH_DATA_sb].buckets +
107 dev.d[BCH_DATA_journal].buckets) *
111 percpu_up_write(&c->mark_lock);
114 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
115 unsigned journal_seq,
118 return this_cpu_ptr(gc
120 : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
123 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
125 struct bch_fs *c = ca->fs;
126 struct bch_dev_usage ret;
127 unsigned seq, i, u64s = dev_usage_u64s();
130 seq = read_seqcount_begin(&c->usage_lock);
131 memcpy(&ret, ca->usage_base, u64s * sizeof(u64));
132 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
133 acc_u64s_percpu((u64 *) &ret, (u64 __percpu *) ca->usage[i], u64s);
134 } while (read_seqcount_retry(&c->usage_lock, seq));
139 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
140 unsigned journal_seq,
143 return this_cpu_ptr(gc
145 : c->usage[journal_seq & JOURNAL_BUF_MASK]);
148 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
150 ssize_t offset = v - (u64 *) c->usage_base;
154 BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
155 percpu_rwsem_assert_held(&c->mark_lock);
158 seq = read_seqcount_begin(&c->usage_lock);
161 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
162 ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
163 } while (read_seqcount_retry(&c->usage_lock, seq));
168 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
170 struct bch_fs_usage_online *ret;
171 unsigned seq, i, u64s;
173 percpu_down_read(&c->mark_lock);
175 ret = kmalloc(sizeof(struct bch_fs_usage_online) +
176 sizeof(u64) * c->replicas.nr, GFP_NOFS);
177 if (unlikely(!ret)) {
178 percpu_up_read(&c->mark_lock);
182 ret->online_reserved = percpu_u64_get(c->online_reserved);
184 u64s = fs_usage_u64s(c);
186 seq = read_seqcount_begin(&c->usage_lock);
187 memcpy(&ret->u, c->usage_base, u64s * sizeof(u64));
188 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
189 acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], u64s);
190 } while (read_seqcount_retry(&c->usage_lock, seq));
195 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
198 unsigned i, u64s = fs_usage_u64s(c);
200 BUG_ON(idx >= ARRAY_SIZE(c->usage));
203 write_seqcount_begin(&c->usage_lock);
205 acc_u64s_percpu((u64 *) c->usage_base,
206 (u64 __percpu *) c->usage[idx], u64s);
207 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
210 for_each_member_device_rcu(ca, c, i, NULL) {
211 u64s = dev_usage_u64s();
213 acc_u64s_percpu((u64 *) ca->usage_base,
214 (u64 __percpu *) ca->usage[idx], u64s);
215 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
219 write_seqcount_end(&c->usage_lock);
223 void bch2_fs_usage_to_text(struct printbuf *out,
225 struct bch_fs_usage_online *fs_usage)
229 pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
231 pr_buf(out, "hidden:\t\t\t\t%llu\n",
233 pr_buf(out, "data:\t\t\t\t%llu\n",
235 pr_buf(out, "cached:\t\t\t\t%llu\n",
237 pr_buf(out, "reserved:\t\t\t%llu\n",
238 fs_usage->u.reserved);
239 pr_buf(out, "nr_inodes:\t\t\t%llu\n",
240 fs_usage->u.nr_inodes);
241 pr_buf(out, "online reserved:\t\t%llu\n",
242 fs_usage->online_reserved);
245 i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
247 pr_buf(out, "%u replicas:\n", i + 1);
248 pr_buf(out, "\treserved:\t\t%llu\n",
249 fs_usage->u.persistent_reserved[i]);
252 for (i = 0; i < c->replicas.nr; i++) {
253 struct bch_replicas_entry *e =
254 cpu_replicas_entry(&c->replicas, i);
257 bch2_replicas_entry_to_text(out, e);
258 pr_buf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
262 static u64 reserve_factor(u64 r)
264 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
267 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
269 return min(fs_usage->u.hidden +
272 reserve_factor(fs_usage->u.reserved +
273 fs_usage->online_reserved),
277 static struct bch_fs_usage_short
278 __bch2_fs_usage_read_short(struct bch_fs *c)
280 struct bch_fs_usage_short ret;
283 ret.capacity = c->capacity -
284 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
286 data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
287 bch2_fs_usage_read_one(c, &c->usage_base->btree);
288 reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
289 percpu_u64_get(c->online_reserved);
291 ret.used = min(ret.capacity, data + reserve_factor(reserved));
292 ret.free = ret.capacity - ret.used;
294 ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
299 struct bch_fs_usage_short
300 bch2_fs_usage_read_short(struct bch_fs *c)
302 struct bch_fs_usage_short ret;
304 percpu_down_read(&c->mark_lock);
305 ret = __bch2_fs_usage_read_short(c);
306 percpu_up_read(&c->mark_lock);
311 static inline int is_unavailable_bucket(struct bucket_mark m)
313 return !is_available_bucket(m);
316 static inline int bucket_sectors_fragmented(struct bch_dev *ca,
317 struct bucket_mark m)
319 return bucket_sectors_used(m)
320 ? max(0, (int) ca->mi.bucket_size - (int) bucket_sectors_used(m))
324 static inline int is_stripe_data_bucket(struct bucket_mark m)
326 return m.stripe && m.data_type != BCH_DATA_parity;
329 static inline enum bch_data_type bucket_type(struct bucket_mark m)
331 return m.cached_sectors && !m.dirty_sectors
336 static bool bucket_became_unavailable(struct bucket_mark old,
337 struct bucket_mark new)
339 return is_available_bucket(old) &&
340 !is_available_bucket(new);
343 static inline void account_bucket(struct bch_fs_usage *fs_usage,
344 struct bch_dev_usage *dev_usage,
345 enum bch_data_type type,
348 if (type == BCH_DATA_sb || type == BCH_DATA_journal)
349 fs_usage->hidden += size;
351 dev_usage->d[type].buckets += nr;
354 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
355 struct bucket_mark old, struct bucket_mark new,
356 u64 journal_seq, bool gc)
358 struct bch_fs_usage *fs_usage;
359 struct bch_dev_usage *u;
361 percpu_rwsem_assert_held(&c->mark_lock);
364 fs_usage = fs_usage_ptr(c, journal_seq, gc);
365 u = dev_usage_ptr(ca, journal_seq, gc);
367 if (bucket_type(old))
368 account_bucket(fs_usage, u, bucket_type(old),
369 -1, -ca->mi.bucket_size);
371 if (bucket_type(new))
372 account_bucket(fs_usage, u, bucket_type(new),
373 1, ca->mi.bucket_size);
375 u->buckets_ec += (int) new.stripe - (int) old.stripe;
376 u->buckets_unavailable +=
377 is_unavailable_bucket(new) - is_unavailable_bucket(old);
379 u->d[old.data_type].sectors -= old.dirty_sectors;
380 u->d[new.data_type].sectors += new.dirty_sectors;
381 u->d[BCH_DATA_cached].sectors +=
382 (int) new.cached_sectors - (int) old.cached_sectors;
384 u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old);
385 u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new);
389 if (!is_available_bucket(old) && is_available_bucket(new))
390 bch2_wake_allocator(ca);
393 static inline int __update_replicas(struct bch_fs *c,
394 struct bch_fs_usage *fs_usage,
395 struct bch_replicas_entry *r,
398 int idx = bch2_replicas_entry_idx(c, r);
403 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
404 fs_usage->replicas[idx] += sectors;
408 static inline int update_replicas(struct bch_fs *c,
409 struct bch_replicas_entry *r, s64 sectors,
410 unsigned journal_seq, bool gc)
412 struct bch_fs_usage __percpu *fs_usage;
413 int idx = bch2_replicas_entry_idx(c, r);
419 fs_usage = fs_usage_ptr(c, journal_seq, gc);
420 fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
421 fs_usage->replicas[idx] += sectors;
426 static inline int update_cached_sectors(struct bch_fs *c,
427 unsigned dev, s64 sectors,
428 unsigned journal_seq, bool gc)
430 struct bch_replicas_padded r;
432 bch2_replicas_entry_cached(&r.e, dev);
434 return update_replicas(c, &r.e, sectors, journal_seq, gc);
437 static struct replicas_delta_list *
438 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
440 struct replicas_delta_list *d = trans->fs_usage_deltas;
441 unsigned new_size = d ? (d->size + more) * 2 : 128;
442 unsigned alloc_size = sizeof(*d) + new_size;
444 WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
446 if (!d || d->used + more > d->size) {
447 d = krealloc(d, alloc_size, GFP_NOIO|__GFP_ZERO);
449 BUG_ON(!d && alloc_size > REPLICAS_DELTA_LIST_MAX);
452 d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOIO);
453 memset(d, 0, REPLICAS_DELTA_LIST_MAX);
455 if (trans->fs_usage_deltas)
456 memcpy(d, trans->fs_usage_deltas,
457 trans->fs_usage_deltas->size + sizeof(*d));
459 new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
460 kfree(trans->fs_usage_deltas);
464 trans->fs_usage_deltas = d;
469 static inline void update_replicas_list(struct btree_trans *trans,
470 struct bch_replicas_entry *r,
473 struct replicas_delta_list *d;
474 struct replicas_delta *n;
480 b = replicas_entry_bytes(r) + 8;
481 d = replicas_deltas_realloc(trans, b);
483 n = (void *) d->d + d->used;
485 memcpy(&n->r, r, replicas_entry_bytes(r));
486 bch2_replicas_entry_sort(&n->r);
490 static inline void update_cached_sectors_list(struct btree_trans *trans,
491 unsigned dev, s64 sectors)
493 struct bch_replicas_padded r;
495 bch2_replicas_entry_cached(&r.e, dev);
497 update_replicas_list(trans, &r.e, sectors);
500 #define do_mark_fn(fn, c, pos, flags, ...) \
504 percpu_rwsem_assert_held(&c->mark_lock); \
506 for (gc = 0; gc < 2 && !ret; gc++) \
507 if (!gc == !(flags & BTREE_TRIGGER_GC) || \
508 (gc && gc_visited(c, pos))) \
509 ret = fn(c, __VA_ARGS__, gc); \
513 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
514 size_t b, bool owned_by_allocator)
516 struct bucket *g = bucket(ca, b);
517 struct bucket_mark old, new;
519 old = bucket_cmpxchg(g, new, ({
520 new.owned_by_allocator = owned_by_allocator;
523 BUG_ON(owned_by_allocator == old.owned_by_allocator);
526 static int bch2_mark_alloc(struct bch_fs *c,
527 struct bkey_s_c old, struct bkey_s_c new,
528 u64 journal_seq, unsigned flags)
530 bool gc = flags & BTREE_TRIGGER_GC;
531 struct bkey_alloc_unpacked u;
534 struct bucket_mark old_m, m;
536 /* We don't do anything for deletions - do we?: */
537 if (new.k->type != KEY_TYPE_alloc &&
538 new.k->type != KEY_TYPE_alloc_v2)
542 * alloc btree is read in by bch2_alloc_read, not gc:
544 if ((flags & BTREE_TRIGGER_GC) &&
545 !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
548 ca = bch_dev_bkey_exists(c, new.k->p.inode);
550 if (new.k->p.offset >= ca->mi.nbuckets)
553 g = __bucket(ca, new.k->p.offset, gc);
554 u = bch2_alloc_unpack(new);
556 old_m = bucket_cmpxchg(g, m, ({
558 m.data_type = u.data_type;
559 m.dirty_sectors = u.dirty_sectors;
560 m.cached_sectors = u.cached_sectors;
561 m.stripe = u.stripe != 0;
564 m.journal_seq_valid = 1;
565 m.journal_seq = journal_seq;
569 bch2_dev_usage_update(c, ca, old_m, m, journal_seq, gc);
571 g->io_time[READ] = u.read_time;
572 g->io_time[WRITE] = u.write_time;
573 g->oldest_gen = u.oldest_gen;
575 g->stripe = u.stripe;
576 g->stripe_redundancy = u.stripe_redundancy;
579 * need to know if we're getting called from the invalidate path or
583 if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
584 old_m.cached_sectors) {
585 if (update_cached_sectors(c, ca->dev_idx, -old_m.cached_sectors,
587 bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors");
591 trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
592 old_m.cached_sectors);
598 #define checked_add(a, b) \
600 unsigned _res = (unsigned) (a) + (b); \
601 bool overflow = _res > U16_MAX; \
608 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
609 size_t b, enum bch_data_type data_type,
610 unsigned sectors, bool gc)
612 struct bucket *g = __bucket(ca, b, gc);
613 struct bucket_mark old, new;
616 BUG_ON(data_type != BCH_DATA_sb &&
617 data_type != BCH_DATA_journal);
619 old = bucket_cmpxchg(g, new, ({
620 new.data_type = data_type;
621 overflow = checked_add(new.dirty_sectors, sectors);
624 bch2_fs_inconsistent_on(old.data_type &&
625 old.data_type != data_type, c,
626 "different types of data in same bucket: %s, %s",
627 bch2_data_types[old.data_type],
628 bch2_data_types[data_type]);
630 bch2_fs_inconsistent_on(overflow, c,
631 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
632 ca->dev_idx, b, new.gen,
633 bch2_data_types[old.data_type ?: data_type],
634 old.dirty_sectors, sectors);
637 bch2_dev_usage_update(c, ca, old, new, 0, gc);
642 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
643 size_t b, enum bch_data_type type,
644 unsigned sectors, struct gc_pos pos,
647 BUG_ON(type != BCH_DATA_sb &&
648 type != BCH_DATA_journal);
651 * Backup superblock might be past the end of our normal usable space:
653 if (b >= ca->mi.nbuckets)
657 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
658 ca, b, type, sectors);
660 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
664 static s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
666 EBUG_ON(sectors < 0);
668 return p.crc.compression_type &&
669 p.crc.compression_type != BCH_COMPRESSION_TYPE_incompressible
670 ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
671 p.crc.uncompressed_size)
675 static int check_bucket_ref(struct bch_fs *c, struct bkey_s_c k,
676 const struct bch_extent_ptr *ptr,
677 s64 sectors, enum bch_data_type ptr_data_type,
678 u8 bucket_gen, u8 bucket_data_type,
679 u16 dirty_sectors, u16 cached_sectors)
681 size_t bucket_nr = PTR_BUCKET_NR(bch_dev_bkey_exists(c, ptr->dev), ptr);
682 u16 bucket_sectors = !ptr->cached
687 if (gen_after(ptr->gen, bucket_gen)) {
688 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
689 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
691 ptr->dev, bucket_nr, bucket_gen,
692 bch2_data_types[bucket_data_type ?: ptr_data_type],
694 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
698 if (gen_cmp(bucket_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
699 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
700 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
702 ptr->dev, bucket_nr, bucket_gen,
703 bch2_data_types[bucket_data_type ?: ptr_data_type],
705 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
709 if (bucket_gen != ptr->gen && !ptr->cached) {
710 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
711 "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
713 ptr->dev, bucket_nr, bucket_gen,
714 bch2_data_types[bucket_data_type ?: ptr_data_type],
716 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
720 if (bucket_gen != ptr->gen)
723 if (bucket_data_type && ptr_data_type &&
724 bucket_data_type != ptr_data_type) {
725 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
726 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
728 ptr->dev, bucket_nr, bucket_gen,
729 bch2_data_types[bucket_data_type],
730 bch2_data_types[ptr_data_type],
731 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
735 if ((unsigned) (bucket_sectors + sectors) > U16_MAX) {
736 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
737 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
739 ptr->dev, bucket_nr, bucket_gen,
740 bch2_data_types[bucket_data_type ?: ptr_data_type],
741 bucket_sectors, sectors,
742 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
749 static int mark_stripe_bucket(struct bch_fs *c, struct bkey_s_c k,
751 u64 journal_seq, unsigned flags)
753 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
754 unsigned nr_data = s->nr_blocks - s->nr_redundant;
755 bool parity = ptr_idx >= nr_data;
756 const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
757 bool gc = flags & BTREE_TRIGGER_GC;
758 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
759 struct bucket *g = PTR_BUCKET(ca, ptr, gc);
760 struct bucket_mark new, old;
764 if (g->stripe && g->stripe != k.k->p.offset) {
765 bch2_fs_inconsistent(c,
766 "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
767 ptr->dev, PTR_BUCKET_NR(ca, ptr), g->mark.gen,
768 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
772 old = bucket_cmpxchg(g, new, ({
773 ret = check_bucket_ref(c, k, ptr, 0, 0, new.gen, new.data_type,
774 new.dirty_sectors, new.cached_sectors);
779 new.data_type = BCH_DATA_parity;
780 new.dirty_sectors = le16_to_cpu(s->sectors);
784 new.journal_seq_valid = 1;
785 new.journal_seq = journal_seq;
789 g->stripe = k.k->p.offset;
790 g->stripe_redundancy = s->nr_redundant;
792 bch2_dev_usage_update(c, ca, old, new, journal_seq, gc);
796 static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
797 const struct bch_extent_ptr *ptr,
798 s64 sectors, enum bch_data_type ptr_data_type,
799 u8 bucket_gen, u8 *bucket_data_type,
800 u16 *dirty_sectors, u16 *cached_sectors)
802 u16 *dst_sectors = !ptr->cached
805 int ret = check_bucket_ref(c, k, ptr, sectors, ptr_data_type,
806 bucket_gen, *bucket_data_type,
807 *dirty_sectors, *cached_sectors);
812 *dst_sectors += sectors;
813 *bucket_data_type = *dirty_sectors || *cached_sectors
818 static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
819 struct extent_ptr_decoded p,
820 s64 sectors, enum bch_data_type data_type,
821 u64 journal_seq, unsigned flags)
823 bool gc = flags & BTREE_TRIGGER_GC;
824 struct bucket_mark old, new;
825 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
826 struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
831 v = atomic64_read(&g->_mark.v);
833 new.v.counter = old.v.counter = v;
834 bucket_data_type = new.data_type;
836 ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, new.gen,
839 &new.cached_sectors);
843 new.data_type = bucket_data_type;
846 new.journal_seq_valid = 1;
847 new.journal_seq = journal_seq;
850 if (flags & BTREE_TRIGGER_NOATOMIC) {
854 } while ((v = atomic64_cmpxchg(&g->_mark.v,
856 new.v.counter)) != old.v.counter);
858 bch2_dev_usage_update(c, ca, old, new, journal_seq, gc);
860 BUG_ON(!gc && bucket_became_unavailable(old, new));
865 static int bch2_mark_stripe_ptr(struct bch_fs *c,
866 struct bch_extent_stripe_ptr p,
867 enum bch_data_type data_type,
869 unsigned journal_seq, unsigned flags)
871 bool gc = flags & BTREE_TRIGGER_GC;
872 struct bch_replicas_padded r;
874 unsigned i, blocks_nonempty = 0;
876 m = genradix_ptr(&c->stripes[gc], p.idx);
878 spin_lock(&c->ec_stripes_heap_lock);
880 if (!m || !m->alive) {
881 spin_unlock(&c->ec_stripes_heap_lock);
882 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
884 bch2_inconsistent_error(c);
888 m->block_sectors[p.block] += sectors;
892 for (i = 0; i < m->nr_blocks; i++)
893 blocks_nonempty += m->block_sectors[i] != 0;
895 if (m->blocks_nonempty != blocks_nonempty) {
896 m->blocks_nonempty = blocks_nonempty;
898 bch2_stripes_heap_update(c, m, p.idx);
901 spin_unlock(&c->ec_stripes_heap_lock);
903 r.e.data_type = data_type;
904 update_replicas(c, &r.e, sectors, journal_seq, gc);
909 static int bch2_mark_extent(struct bch_fs *c,
910 struct bkey_s_c old, struct bkey_s_c new,
911 unsigned journal_seq, unsigned flags)
913 bool gc = flags & BTREE_TRIGGER_GC;
914 struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
915 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
916 const union bch_extent_entry *entry;
917 struct extent_ptr_decoded p;
918 struct bch_replicas_padded r;
919 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
922 s64 sectors = bkey_is_btree_ptr(k.k)
923 ? c->opts.btree_node_size
925 s64 dirty_sectors = 0;
929 BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) ==
930 (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE));
932 r.e.data_type = data_type;
936 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
937 s64 disk_sectors = ptr_disk_sectors(sectors, p);
939 if (flags & BTREE_TRIGGER_OVERWRITE)
940 disk_sectors = -disk_sectors;
942 ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
951 if (update_cached_sectors(c, p.ptr.dev, disk_sectors,
953 bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
957 } else if (!p.has_ec) {
958 dirty_sectors += disk_sectors;
959 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
961 ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
962 disk_sectors, journal_seq, flags);
967 * There may be other dirty pointers in this extent, but
968 * if so they're not required for mounting if we have an
969 * erasure coded pointer in this extent:
976 if (update_replicas(c, &r.e, dirty_sectors, journal_seq, gc)) {
979 bch2_bkey_val_to_text(&PBUF(buf), c, k);
980 bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
988 static int bch2_mark_stripe(struct bch_fs *c,
989 struct bkey_s_c old, struct bkey_s_c new,
990 u64 journal_seq, unsigned flags)
992 bool gc = flags & BTREE_TRIGGER_GC;
993 size_t idx = new.k->p.offset;
994 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
995 ? bkey_s_c_to_stripe(old).v : NULL;
996 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
997 ? bkey_s_c_to_stripe(new).v : NULL;
998 struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1002 BUG_ON(gc && old_s);
1004 if (!m || (old_s && !m->alive)) {
1005 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1007 bch2_inconsistent_error(c);
1012 spin_lock(&c->ec_stripes_heap_lock);
1013 bch2_stripes_heap_del(c, m, idx);
1014 spin_unlock(&c->ec_stripes_heap_lock);
1016 memset(m, 0, sizeof(*m));
1019 m->sectors = le16_to_cpu(new_s->sectors);
1020 m->algorithm = new_s->algorithm;
1021 m->nr_blocks = new_s->nr_blocks;
1022 m->nr_redundant = new_s->nr_redundant;
1023 m->blocks_nonempty = 0;
1025 for (i = 0; i < new_s->nr_blocks; i++) {
1026 m->block_sectors[i] =
1027 stripe_blockcount_get(new_s, i);
1028 m->blocks_nonempty += !!m->block_sectors[i];
1030 m->ptrs[i] = new_s->ptrs[i];
1033 bch2_bkey_to_replicas(&m->r.e, new);
1036 spin_lock(&c->ec_stripes_heap_lock);
1037 bch2_stripes_heap_update(c, m, idx);
1038 spin_unlock(&c->ec_stripes_heap_lock);
1044 * gc recalculates this field from stripe ptr
1047 memset(m->block_sectors, 0, sizeof(m->block_sectors));
1048 m->blocks_nonempty = 0;
1050 for (i = 0; i < new_s->nr_blocks; i++) {
1051 ret = mark_stripe_bucket(c, new, i, journal_seq, flags);
1056 if (update_replicas(c, &m->r.e,
1057 ((s64) m->sectors * m->nr_redundant),
1061 bch2_bkey_val_to_text(&PBUF(buf), c, new);
1062 bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
1070 static int bch2_mark_inode(struct bch_fs *c,
1071 struct bkey_s_c old, struct bkey_s_c new,
1072 u64 journal_seq, unsigned flags)
1074 struct bch_fs_usage __percpu *fs_usage;
1077 fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
1078 fs_usage->nr_inodes += new.k->type == KEY_TYPE_inode;
1079 fs_usage->nr_inodes -= old.k->type == KEY_TYPE_inode;
1084 static int bch2_mark_reservation(struct bch_fs *c,
1085 struct bkey_s_c old, struct bkey_s_c new,
1086 u64 journal_seq, unsigned flags)
1088 struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1089 struct bch_fs_usage __percpu *fs_usage;
1090 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1091 s64 sectors = (s64) k.k->size;
1093 if (flags & BTREE_TRIGGER_OVERWRITE)
1095 sectors *= replicas;
1098 fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
1099 replicas = clamp_t(unsigned, replicas, 1,
1100 ARRAY_SIZE(fs_usage->persistent_reserved));
1102 fs_usage->reserved += sectors;
1103 fs_usage->persistent_reserved[replicas - 1] += sectors;
1109 static s64 __bch2_mark_reflink_p(struct bch_fs *c, struct bkey_s_c_reflink_p p,
1110 u64 idx, unsigned flags, size_t *r_idx)
1112 struct reflink_gc *r;
1113 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1116 if (*r_idx >= c->reflink_gc_nr)
1118 r = genradix_ptr(&c->reflink_gc_table, *r_idx);
1121 if (idx < r->offset)
1126 BUG_ON((s64) r->refcount + add < 0);
1129 return r->offset - idx;
1131 bch2_fs_inconsistent(c,
1132 "%llu:%llu len %u points to nonexistent indirect extent %llu",
1133 p.k->p.inode, p.k->p.offset, p.k->size, idx);
1134 bch2_inconsistent_error(c);
1138 static int bch2_mark_reflink_p(struct bch_fs *c,
1139 struct bkey_s_c old, struct bkey_s_c new,
1140 u64 journal_seq, unsigned flags)
1142 struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1143 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
1144 struct reflink_gc *ref;
1146 u64 idx = le64_to_cpu(p.v->idx);
1147 unsigned sectors = p.k->size;
1150 BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) ==
1151 (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE));
1154 r = c->reflink_gc_nr;
1156 m = l + (r - l) / 2;
1158 ref = genradix_ptr(&c->reflink_gc_table, m);
1159 if (ref->offset <= idx)
1166 ret = __bch2_mark_reflink_p(c, p, idx, flags, &l);
1170 ret = min_t(s64, ret, sectors);
1178 static int bch2_mark_key_locked(struct bch_fs *c,
1179 struct bkey_s_c old,
1180 struct bkey_s_c new,
1181 u64 journal_seq, unsigned flags)
1183 struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1185 BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1187 switch (k.k->type) {
1188 case KEY_TYPE_alloc:
1189 case KEY_TYPE_alloc_v2:
1190 return bch2_mark_alloc(c, old, new, journal_seq, flags);
1191 case KEY_TYPE_btree_ptr:
1192 case KEY_TYPE_btree_ptr_v2:
1193 case KEY_TYPE_extent:
1194 case KEY_TYPE_reflink_v:
1195 return bch2_mark_extent(c, old, new, journal_seq, flags);
1196 case KEY_TYPE_stripe:
1197 return bch2_mark_stripe(c, old, new, journal_seq, flags);
1198 case KEY_TYPE_inode:
1199 return bch2_mark_inode(c, old, new, journal_seq, flags);
1200 case KEY_TYPE_reservation:
1201 return bch2_mark_reservation(c, old, new, journal_seq, flags);
1202 case KEY_TYPE_reflink_p:
1203 return bch2_mark_reflink_p(c, old, new, journal_seq, flags);
1204 case KEY_TYPE_snapshot:
1205 return bch2_mark_snapshot(c, old, new, journal_seq, flags);
1211 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c new, unsigned flags)
1213 struct bkey deleted = KEY(0, 0, 0);
1214 struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
1217 percpu_down_read(&c->mark_lock);
1218 ret = bch2_mark_key_locked(c, old, new, 0, flags);
1219 percpu_up_read(&c->mark_lock);
1224 int bch2_mark_update(struct btree_trans *trans, struct btree_path *path,
1225 struct bkey_i *new, unsigned flags)
1227 struct bch_fs *c = trans->c;
1228 struct bkey _deleted = KEY(0, 0, 0);
1229 struct bkey_s_c deleted = (struct bkey_s_c) { &_deleted, NULL };
1230 struct bkey_s_c old;
1231 struct bkey unpacked;
1234 if (unlikely(flags & BTREE_TRIGGER_NORUN))
1237 if (!btree_node_type_needs_gc(path->btree_id))
1240 old = bch2_btree_path_peek_slot(path, &unpacked);
1242 if (old.k->type == new->k.type &&
1243 ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
1244 ret = bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1245 trans->journal_res.seq,
1246 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1248 ret = bch2_mark_key_locked(c, deleted, bkey_i_to_s_c(new),
1249 trans->journal_res.seq,
1250 BTREE_TRIGGER_INSERT|flags) ?:
1251 bch2_mark_key_locked(c, old, deleted,
1252 trans->journal_res.seq,
1253 BTREE_TRIGGER_OVERWRITE|flags);
1259 static noinline __cold
1260 void fs_usage_apply_warn(struct btree_trans *trans,
1261 unsigned disk_res_sectors,
1262 s64 should_not_have_added)
1264 struct bch_fs *c = trans->c;
1265 struct btree_insert_entry *i;
1268 bch_err(c, "disk usage increased %lli more than %u sectors reserved",
1269 should_not_have_added, disk_res_sectors);
1271 trans_for_each_update(trans, i) {
1272 pr_err("while inserting");
1273 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1275 pr_err("overlapping with");
1279 struct bkey_s_c k = bch2_btree_path_peek_slot(i->path, &u);
1281 bch2_bkey_val_to_text(&PBUF(buf), c, k);
1284 struct bkey_cached *ck = (void *) i->path->l[0].b;
1287 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
1295 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1296 struct replicas_delta_list *deltas)
1298 struct bch_fs *c = trans->c;
1299 static int warned_disk_usage = 0;
1301 unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1302 struct replicas_delta *d = deltas->d;
1303 struct replicas_delta *top = (void *) deltas->d + deltas->used;
1304 struct bch_fs_usage *dst;
1305 s64 added = 0, should_not_have_added;
1308 percpu_rwsem_assert_held(&c->mark_lock);
1311 dst = fs_usage_ptr(c, trans->journal_res.seq, false);
1313 for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
1314 switch (d->r.data_type) {
1315 case BCH_DATA_btree:
1317 case BCH_DATA_parity:
1321 BUG_ON(__update_replicas(c, dst, &d->r, d->delta));
1324 dst->nr_inodes += deltas->nr_inodes;
1326 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
1327 added += deltas->persistent_reserved[i];
1328 dst->reserved += deltas->persistent_reserved[i];
1329 dst->persistent_reserved[i] += deltas->persistent_reserved[i];
1333 * Not allowed to reduce sectors_available except by getting a
1336 should_not_have_added = added - (s64) disk_res_sectors;
1337 if (unlikely(should_not_have_added > 0)) {
1338 u64 old, new, v = atomic64_read(&c->sectors_available);
1342 new = max_t(s64, 0, old - should_not_have_added);
1343 } while ((v = atomic64_cmpxchg(&c->sectors_available,
1346 added -= should_not_have_added;
1351 trans->disk_res->sectors -= added;
1352 this_cpu_sub(*c->online_reserved, added);
1357 if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
1358 fs_usage_apply_warn(trans, disk_res_sectors, should_not_have_added);
1363 static struct bkey_alloc_buf *
1364 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
1365 const struct bch_extent_ptr *ptr,
1366 struct bkey_alloc_unpacked *u)
1368 struct bch_fs *c = trans->c;
1369 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1370 struct bpos pos = POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
1372 struct bkey_alloc_buf *a;
1373 struct bkey_i *update = btree_trans_peek_updates(trans, BTREE_ID_alloc, pos);
1376 a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
1380 bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos,
1382 BTREE_ITER_CACHED_NOFILL|
1384 ret = bch2_btree_iter_traverse(iter);
1386 bch2_trans_iter_exit(trans, iter);
1387 return ERR_PTR(ret);
1390 if (update && !bpos_cmp(update->k.p, pos)) {
1391 *u = bch2_alloc_unpack(bkey_i_to_s_c(update));
1393 percpu_down_read(&c->mark_lock);
1394 g = bucket(ca, pos.offset);
1395 *u = alloc_mem_to_key(iter, g, READ_ONCE(g->mark));
1396 percpu_up_read(&c->mark_lock);
1402 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1403 struct bkey_s_c k, struct extent_ptr_decoded p,
1404 s64 sectors, enum bch_data_type data_type)
1406 struct bch_fs *c = trans->c;
1407 struct btree_iter iter;
1408 struct bkey_alloc_unpacked u;
1409 struct bkey_alloc_buf *a;
1412 a = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
1416 ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, u.gen, &u.data_type,
1417 &u.dirty_sectors, &u.cached_sectors);
1421 bch2_alloc_pack(c, a, u);
1422 bch2_trans_update(trans, &iter, &a->k, 0);
1424 bch2_trans_iter_exit(trans, &iter);
1428 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1429 struct extent_ptr_decoded p,
1430 s64 sectors, enum bch_data_type data_type)
1432 struct bch_fs *c = trans->c;
1433 struct btree_iter iter;
1435 struct bkey_i_stripe *s;
1436 struct bch_replicas_padded r;
1439 bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes, POS(0, p.ec.idx),
1441 BTREE_ITER_WITH_UPDATES);
1442 k = bch2_btree_iter_peek_slot(&iter);
1447 if (k.k->type != KEY_TYPE_stripe) {
1448 bch2_fs_inconsistent(c,
1449 "pointer to nonexistent stripe %llu",
1451 bch2_inconsistent_error(c);
1456 if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) {
1457 bch2_fs_inconsistent(c,
1458 "stripe pointer doesn't match stripe %llu",
1464 s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1465 ret = PTR_ERR_OR_ZERO(s);
1469 bkey_reassemble(&s->k_i, k);
1470 stripe_blockcount_set(&s->v, p.ec.block,
1471 stripe_blockcount_get(&s->v, p.ec.block) +
1473 bch2_trans_update(trans, &iter, &s->k_i, 0);
1475 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1476 r.e.data_type = data_type;
1477 update_replicas_list(trans, &r.e, sectors);
1479 bch2_trans_iter_exit(trans, &iter);
1483 static int bch2_trans_mark_extent(struct btree_trans *trans,
1484 struct bkey_s_c k, unsigned flags)
1486 struct bch_fs *c = trans->c;
1487 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1488 const union bch_extent_entry *entry;
1489 struct extent_ptr_decoded p;
1490 struct bch_replicas_padded r;
1491 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
1494 s64 sectors = bkey_is_btree_ptr(k.k)
1495 ? c->opts.btree_node_size
1497 s64 dirty_sectors = 0;
1501 BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) ==
1502 (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE));
1504 r.e.data_type = data_type;
1506 r.e.nr_required = 1;
1508 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1509 s64 disk_sectors = ptr_disk_sectors(sectors, p);
1511 if (flags & BTREE_TRIGGER_OVERWRITE)
1512 disk_sectors = -disk_sectors;
1514 ret = bch2_trans_mark_pointer(trans, k, p,
1515 disk_sectors, data_type);
1523 update_cached_sectors_list(trans, p.ptr.dev,
1525 } else if (!p.has_ec) {
1526 dirty_sectors += disk_sectors;
1527 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1529 ret = bch2_trans_mark_stripe_ptr(trans, p,
1530 disk_sectors, data_type);
1534 r.e.nr_required = 0;
1539 update_replicas_list(trans, &r.e, dirty_sectors);
1544 static int bch2_trans_mark_stripe_alloc_ref(struct btree_trans *trans,
1545 struct bkey_s_c_stripe s,
1546 unsigned idx, bool deleting)
1548 struct bch_fs *c = trans->c;
1549 const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
1550 struct bkey_alloc_buf *a;
1551 struct btree_iter iter;
1552 struct bkey_alloc_unpacked u;
1553 bool parity = idx >= s.v->nr_blocks - s.v->nr_redundant;
1556 a = bch2_trans_start_alloc_update(trans, &iter, ptr, &u);
1561 s64 sectors = le16_to_cpu(s.v->sectors);
1566 u.dirty_sectors += sectors;
1567 u.data_type = u.dirty_sectors
1573 if (bch2_fs_inconsistent_on(u.stripe && u.stripe != s.k->p.offset, c,
1574 "bucket %llu:%llu gen %u: multiple stripes using same bucket (%u, %llu)",
1575 iter.pos.inode, iter.pos.offset, u.gen,
1576 u.stripe, s.k->p.offset)) {
1581 u.stripe = s.k->p.offset;
1582 u.stripe_redundancy = s.v->nr_redundant;
1585 u.stripe_redundancy = 0;
1588 bch2_alloc_pack(c, a, u);
1589 bch2_trans_update(trans, &iter, &a->k, 0);
1591 bch2_trans_iter_exit(trans, &iter);
1595 static int bch2_trans_mark_stripe(struct btree_trans *trans,
1596 struct bkey_s_c old, struct bkey_s_c new,
1599 struct bkey_s_c_stripe old_s = { .k = NULL };
1600 struct bkey_s_c_stripe new_s = { .k = NULL };
1601 struct bch_replicas_padded r;
1605 if (old.k->type == KEY_TYPE_stripe)
1606 old_s = bkey_s_c_to_stripe(old);
1607 if (new.k->type == KEY_TYPE_stripe)
1608 new_s = bkey_s_c_to_stripe(new);
1611 * If the pointers aren't changing, we don't need to do anything:
1613 if (new_s.k && old_s.k &&
1614 new_s.v->nr_blocks == old_s.v->nr_blocks &&
1615 new_s.v->nr_redundant == old_s.v->nr_redundant &&
1616 !memcmp(old_s.v->ptrs, new_s.v->ptrs,
1617 new_s.v->nr_blocks * sizeof(struct bch_extent_ptr)))
1621 s64 sectors = le16_to_cpu(new_s.v->sectors);
1623 bch2_bkey_to_replicas(&r.e, new);
1624 update_replicas_list(trans, &r.e, sectors * new_s.v->nr_redundant);
1626 for (i = 0; i < new_s.v->nr_blocks; i++) {
1627 ret = bch2_trans_mark_stripe_alloc_ref(trans, new_s,
1635 s64 sectors = -((s64) le16_to_cpu(old_s.v->sectors));
1637 bch2_bkey_to_replicas(&r.e, old);
1638 update_replicas_list(trans, &r.e, sectors * old_s.v->nr_redundant);
1640 for (i = 0; i < old_s.v->nr_blocks; i++) {
1641 ret = bch2_trans_mark_stripe_alloc_ref(trans, old_s,
1651 static int bch2_trans_mark_inode(struct btree_trans *trans,
1652 struct bkey_s_c old,
1653 struct bkey_s_c new,
1656 int nr = (new.k->type == KEY_TYPE_inode) -
1657 (old.k->type == KEY_TYPE_inode);
1660 struct replicas_delta_list *d =
1661 replicas_deltas_realloc(trans, 0);
1668 static int bch2_trans_mark_reservation(struct btree_trans *trans,
1669 struct bkey_s_c k, unsigned flags)
1671 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1672 s64 sectors = (s64) k.k->size;
1673 struct replicas_delta_list *d;
1675 BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) ==
1676 (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE));
1678 if (flags & BTREE_TRIGGER_OVERWRITE)
1680 sectors *= replicas;
1682 d = replicas_deltas_realloc(trans, 0);
1684 replicas = clamp_t(unsigned, replicas, 1,
1685 ARRAY_SIZE(d->persistent_reserved));
1687 d->persistent_reserved[replicas - 1] += sectors;
1691 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1692 struct bkey_s_c_reflink_p p,
1693 u64 idx, unsigned flags)
1695 struct bch_fs *c = trans->c;
1696 struct btree_iter iter;
1700 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1703 bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, idx),
1705 BTREE_ITER_WITH_UPDATES);
1706 k = bch2_btree_iter_peek_slot(&iter);
1711 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1712 ret = PTR_ERR_OR_ZERO(n);
1716 bkey_reassemble(n, k);
1718 refcount = bkey_refcount(n);
1720 bch2_fs_inconsistent(c,
1721 "%llu:%llu len %u points to nonexistent indirect extent %llu",
1722 p.k->p.inode, p.k->p.offset, p.k->size, idx);
1723 bch2_inconsistent_error(c);
1728 BUG_ON(!*refcount && (flags & BTREE_TRIGGER_OVERWRITE));
1729 le64_add_cpu(refcount, add);
1732 n->k.type = KEY_TYPE_deleted;
1733 set_bkey_val_u64s(&n->k, 0);
1736 bch2_btree_iter_set_pos_to_extent_start(&iter);
1737 ret = bch2_trans_update(trans, &iter, n, 0);
1741 ret = k.k->p.offset - idx;
1743 bch2_trans_iter_exit(trans, &iter);
1747 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1748 struct bkey_s_c k, unsigned flags)
1750 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
1751 u64 idx = le64_to_cpu(p.v->idx);
1752 unsigned sectors = p.k->size;
1756 ret = __bch2_trans_mark_reflink_p(trans, p, idx, flags);
1760 ret = min_t(s64, ret, sectors);
1768 int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c old,
1769 struct bkey_s_c new, unsigned flags)
1771 struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1773 BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1775 switch (k.k->type) {
1776 case KEY_TYPE_btree_ptr:
1777 case KEY_TYPE_btree_ptr_v2:
1778 case KEY_TYPE_extent:
1779 case KEY_TYPE_reflink_v:
1780 return bch2_trans_mark_extent(trans, k, flags);
1781 case KEY_TYPE_stripe:
1782 return bch2_trans_mark_stripe(trans, old, new, flags);
1783 case KEY_TYPE_inode:
1784 return bch2_trans_mark_inode(trans, old, new, flags);
1785 case KEY_TYPE_reservation:
1786 return bch2_trans_mark_reservation(trans, k, flags);
1787 case KEY_TYPE_reflink_p:
1788 return bch2_trans_mark_reflink_p(trans, k, flags);
1794 int bch2_trans_mark_update(struct btree_trans *trans,
1795 struct btree_path *path,
1799 struct bkey _deleted = KEY(0, 0, 0);
1800 struct bkey_s_c deleted = (struct bkey_s_c) { &_deleted, NULL };
1801 struct bkey_s_c old;
1802 struct bkey unpacked;
1805 if (unlikely(flags & BTREE_TRIGGER_NORUN))
1808 if (!btree_node_type_needs_gc(path->btree_id))
1811 old = bch2_btree_path_peek_slot(path, &unpacked);
1813 if (old.k->type == new->k.type &&
1814 ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
1815 ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
1816 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1818 ret = bch2_trans_mark_key(trans, deleted, bkey_i_to_s_c(new),
1819 BTREE_TRIGGER_INSERT|flags) ?:
1820 bch2_trans_mark_key(trans, old, deleted,
1821 BTREE_TRIGGER_OVERWRITE|flags);
1827 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1828 struct bch_dev *ca, size_t b,
1829 enum bch_data_type type,
1832 struct bch_fs *c = trans->c;
1833 struct btree_iter iter;
1834 struct bkey_alloc_unpacked u;
1835 struct bkey_alloc_buf *a;
1836 struct bch_extent_ptr ptr = {
1838 .offset = bucket_to_sector(ca, b),
1843 * Backup superblock might be past the end of our normal usable space:
1845 if (b >= ca->mi.nbuckets)
1848 a = bch2_trans_start_alloc_update(trans, &iter, &ptr, &u);
1852 if (u.data_type && u.data_type != type) {
1853 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1854 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
1856 iter.pos.inode, iter.pos.offset, u.gen,
1857 bch2_data_types[u.data_type],
1858 bch2_data_types[type],
1859 bch2_data_types[type]);
1865 u.dirty_sectors = sectors;
1867 bch2_alloc_pack(c, a, u);
1868 bch2_trans_update(trans, &iter, &a->k, 0);
1870 bch2_trans_iter_exit(trans, &iter);
1874 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1875 struct bch_dev *ca, size_t b,
1876 enum bch_data_type type,
1879 return __bch2_trans_do(trans, NULL, NULL, 0,
1880 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
1883 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
1886 enum bch_data_type type,
1887 u64 *bucket, unsigned *bucket_sectors)
1890 u64 b = sector_to_bucket(ca, start);
1892 min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1894 if (b != *bucket && *bucket_sectors) {
1895 int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1896 type, *bucket_sectors);
1900 *bucket_sectors = 0;
1904 *bucket_sectors += sectors;
1906 } while (start < end);
1911 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
1914 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1916 unsigned i, bucket_sectors = 0;
1919 for (i = 0; i < layout->nr_superblocks; i++) {
1920 u64 offset = le64_to_cpu(layout->sb_offset[i]);
1922 if (offset == BCH_SB_SECTOR) {
1923 ret = bch2_trans_mark_metadata_sectors(trans, ca,
1925 BCH_DATA_sb, &bucket, &bucket_sectors);
1930 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1931 offset + (1 << layout->sb_max_size_bits),
1932 BCH_DATA_sb, &bucket, &bucket_sectors);
1937 if (bucket_sectors) {
1938 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1939 bucket, BCH_DATA_sb, bucket_sectors);
1944 for (i = 0; i < ca->journal.nr; i++) {
1945 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1946 ca->journal.buckets[i],
1947 BCH_DATA_journal, ca->mi.bucket_size);
1955 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
1957 return bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
1958 __bch2_trans_mark_dev_sb(&trans, ca));
1961 /* Disk reservations: */
1963 #define SECTORS_CACHE 1024
1965 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1966 u64 sectors, int flags)
1968 struct bch_fs_pcpu *pcpu;
1970 s64 sectors_available;
1973 percpu_down_read(&c->mark_lock);
1975 pcpu = this_cpu_ptr(c->pcpu);
1977 if (sectors <= pcpu->sectors_available)
1980 v = atomic64_read(&c->sectors_available);
1983 get = min((u64) sectors + SECTORS_CACHE, old);
1985 if (get < sectors) {
1989 } while ((v = atomic64_cmpxchg(&c->sectors_available,
1990 old, old - get)) != old);
1992 pcpu->sectors_available += get;
1995 pcpu->sectors_available -= sectors;
1996 this_cpu_add(*c->online_reserved, sectors);
1997 res->sectors += sectors;
2000 percpu_up_read(&c->mark_lock);
2004 mutex_lock(&c->sectors_available_lock);
2006 percpu_u64_set(&c->pcpu->sectors_available, 0);
2007 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
2009 if (sectors <= sectors_available ||
2010 (flags & BCH_DISK_RESERVATION_NOFAIL)) {
2011 atomic64_set(&c->sectors_available,
2012 max_t(s64, 0, sectors_available - sectors));
2013 this_cpu_add(*c->online_reserved, sectors);
2014 res->sectors += sectors;
2017 atomic64_set(&c->sectors_available, sectors_available);
2021 mutex_unlock(&c->sectors_available_lock);
2022 percpu_up_read(&c->mark_lock);
2027 /* Startup/shutdown: */
2029 static void buckets_free_rcu(struct rcu_head *rcu)
2031 struct bucket_array *buckets =
2032 container_of(rcu, struct bucket_array, rcu);
2035 sizeof(struct bucket_array) +
2036 buckets->nbuckets * sizeof(struct bucket));
2039 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2041 struct bucket_array *buckets = NULL, *old_buckets = NULL;
2042 unsigned long *buckets_nouse = NULL;
2043 alloc_fifo free[RESERVE_NR];
2044 alloc_fifo free_inc;
2045 alloc_heap alloc_heap;
2047 size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
2048 ca->mi.bucket_size / c->opts.btree_node_size);
2049 /* XXX: these should be tunable */
2050 size_t reserve_none = max_t(size_t, 1, nbuckets >> 9);
2051 size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 6);
2052 size_t free_inc_nr = max(max_t(size_t, 1, nbuckets >> 12),
2054 bool resize = ca->buckets[0] != NULL;
2058 memset(&free, 0, sizeof(free));
2059 memset(&free_inc, 0, sizeof(free_inc));
2060 memset(&alloc_heap, 0, sizeof(alloc_heap));
2062 if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
2063 nbuckets * sizeof(struct bucket),
2064 GFP_KERNEL|__GFP_ZERO)) ||
2065 !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2066 sizeof(unsigned long),
2067 GFP_KERNEL|__GFP_ZERO)) ||
2068 !init_fifo(&free[RESERVE_MOVINGGC],
2069 copygc_reserve, GFP_KERNEL) ||
2070 !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
2071 !init_fifo(&free_inc, free_inc_nr, GFP_KERNEL) ||
2072 !init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
2075 buckets->first_bucket = ca->mi.first_bucket;
2076 buckets->nbuckets = nbuckets;
2078 bch2_copygc_stop(c);
2081 down_write(&c->gc_lock);
2082 down_write(&ca->bucket_lock);
2083 percpu_down_write(&c->mark_lock);
2086 old_buckets = bucket_array(ca);
2089 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
2093 n * sizeof(struct bucket));
2094 memcpy(buckets_nouse,
2096 BITS_TO_LONGS(n) * sizeof(unsigned long));
2099 rcu_assign_pointer(ca->buckets[0], buckets);
2100 buckets = old_buckets;
2102 swap(ca->buckets_nouse, buckets_nouse);
2105 percpu_up_write(&c->mark_lock);
2106 up_write(&c->gc_lock);
2109 spin_lock(&c->freelist_lock);
2110 for (i = 0; i < RESERVE_NR; i++) {
2111 fifo_move(&free[i], &ca->free[i]);
2112 swap(ca->free[i], free[i]);
2114 fifo_move(&free_inc, &ca->free_inc);
2115 swap(ca->free_inc, free_inc);
2116 spin_unlock(&c->freelist_lock);
2118 /* with gc lock held, alloc_heap can't be in use: */
2119 swap(ca->alloc_heap, alloc_heap);
2121 nbuckets = ca->mi.nbuckets;
2124 up_write(&ca->bucket_lock);
2128 free_heap(&alloc_heap);
2129 free_fifo(&free_inc);
2130 for (i = 0; i < RESERVE_NR; i++)
2131 free_fifo(&free[i]);
2132 kvpfree(buckets_nouse,
2133 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2135 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2140 void bch2_dev_buckets_free(struct bch_dev *ca)
2144 free_heap(&ca->alloc_heap);
2145 free_fifo(&ca->free_inc);
2146 for (i = 0; i < RESERVE_NR; i++)
2147 free_fifo(&ca->free[i]);
2148 kvpfree(ca->buckets_nouse,
2149 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2150 kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2151 sizeof(struct bucket_array) +
2152 ca->mi.nbuckets * sizeof(struct bucket));
2154 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
2155 free_percpu(ca->usage[i]);
2156 kfree(ca->usage_base);
2159 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2163 ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
2164 if (!ca->usage_base)
2167 for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
2168 ca->usage[i] = alloc_percpu(struct bch_dev_usage);
2173 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;