-static inline struct replicas_delta *
-replicas_delta_next(struct replicas_delta *d)
-{
- return (void *) d + replicas_entry_bytes(&d->r) + 8;
-}
-
-int bch2_replicas_delta_list_apply(struct bch_fs *c,
- struct bch_fs_usage *fs_usage,
- struct replicas_delta_list *r)
-{
- struct replicas_delta *d = r->d;
- struct replicas_delta *top = (void *) r->d + r->used;
- unsigned i;
-
- for (d = r->d; d != top; d = replicas_delta_next(d))
- if (update_replicas(c, fs_usage, &d->r, d->delta)) {
- top = d;
- goto unwind;
- }
-
- if (!fs_usage)
- return 0;
-
- fs_usage->nr_inodes += r->nr_inodes;
-
- for (i = 0; i < BCH_REPLICAS_MAX; i++) {
- fs_usage->reserved += r->persistent_reserved[i];
- fs_usage->persistent_reserved[i] += r->persistent_reserved[i];
- }
-
- return 0;
-unwind:
- for (d = r->d; d != top; d = replicas_delta_next(d))
- update_replicas(c, fs_usage, &d->r, -d->delta);
- return -1;
-}
-
-#define do_mark_fn(fn, c, pos, flags, ...) \
-({ \
- int gc, ret = 0; \
- \
- percpu_rwsem_assert_held(&c->mark_lock); \
- \
- for (gc = 0; gc < 2 && !ret; gc++) \
- if (!gc == !(flags & BTREE_TRIGGER_GC) || \
- (gc && gc_visited(c, pos))) \
- ret = fn(c, __VA_ARGS__, gc); \
- ret; \
-})
-
-static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t b, struct bucket_mark *ret,
- bool gc)
-{
- struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
- struct bucket *g = __bucket(ca, b, gc);
- struct bucket_mark old, new;
-
- old = bucket_cmpxchg(g, new, ({
- BUG_ON(!is_available_bucket(new));
-
- new.owned_by_allocator = true;
- new.data_type = 0;
- new.cached_sectors = 0;
- new.dirty_sectors = 0;
- new.gen++;
- }));
-
- bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
-
- if (old.cached_sectors)
- update_cached_sectors(c, fs_usage, ca->dev_idx,
- -((s64) old.cached_sectors));
-
- if (!gc)
- *ret = old;
- return 0;
-}
-
-void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t b, struct bucket_mark *old)
-{
- do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
- ca, b, old);
-
- if (!old->owned_by_allocator && old->cached_sectors)
- trace_invalidate(ca, bucket_to_sector(ca, b),
- old->cached_sectors);
-}
-
-static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t b, bool owned_by_allocator,
- bool gc)
-{
- struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
- struct bucket *g = __bucket(ca, b, gc);
- struct bucket_mark old, new;
-
- old = bucket_cmpxchg(g, new, ({
- new.owned_by_allocator = owned_by_allocator;
- }));
-
- bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
-
- BUG_ON(!gc &&
- !owned_by_allocator && !old.owned_by_allocator);
-
- return 0;
-}
-
-void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t b, bool owned_by_allocator,
- struct gc_pos pos, unsigned flags)
-{
- preempt_disable();
-
- do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
- ca, b, owned_by_allocator);
-
- preempt_enable();
-}
-
-static int bch2_mark_alloc(struct bch_fs *c,
- struct bkey_s_c old, struct bkey_s_c new,
- struct bch_fs_usage *fs_usage,
- u64 journal_seq, unsigned flags)