+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Code for manipulating bucket marks for garbage collection.
*
#define _BUCKETS_H
#include "buckets_types.h"
+#include "extents.h"
#include "super.h"
-#define for_each_bucket(b, ca) \
- for (b = (ca)->buckets + (ca)->mi.first_bucket; \
- b < (ca)->buckets + (ca)->mi.nbuckets; b++)
-
-#define bucket_cmpxchg(g, new, expr) \
-({ \
- u64 _v = READ_ONCE((g)->_mark.counter); \
- struct bucket_mark _old; \
- \
- do { \
- (new).counter = _old.counter = _v; \
- expr; \
- } while ((_v = cmpxchg(&(g)->_mark.counter, \
- _old.counter, \
- (new).counter)) != _old.counter);\
- _old; \
-})
+#define for_each_bucket(_b, _buckets) \
+ for (_b = (_buckets)->b + (_buckets)->first_bucket; \
+ _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
-/*
- * bucket_gc_gen() returns the difference between the bucket's current gen and
- * the oldest gen of any pointer into that bucket in the btree.
- */
+static inline void bucket_unlock(struct bucket *b)
+{
+ smp_store_release(&b->lock, 0);
+}
+
+static inline void bucket_lock(struct bucket *b)
+{
+ while (xchg(&b->lock, 1))
+ cpu_relax();
+}
+
+static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca)
+{
+ return rcu_dereference_check(ca->buckets_gc,
+ !ca->fs ||
+ percpu_rwsem_is_held(&ca->fs->mark_lock) ||
+ lockdep_is_held(&ca->fs->gc_lock) ||
+ lockdep_is_held(&ca->bucket_lock));
+}
-static inline u8 bucket_gc_gen(struct bch_dev *ca, struct bucket *g)
+static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
{
- unsigned long r = g - ca->buckets;
- return g->mark.gen - ca->oldest_gens[r];
+ struct bucket_array *buckets = gc_bucket_array(ca);
+
+ BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
+ return buckets->b + b;
+}
+
+static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
+{
+ return rcu_dereference_check(ca->bucket_gens,
+ !ca->fs ||
+ percpu_rwsem_is_held(&ca->fs->mark_lock) ||
+ lockdep_is_held(&ca->fs->gc_lock) ||
+ lockdep_is_held(&ca->bucket_lock));
+}
+
+static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
+{
+ struct bucket_gens *gens = bucket_gens(ca);
+
+ BUG_ON(b < gens->first_bucket || b >= gens->nbuckets);
+ return gens->b + b;
}
static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
return sector_to_bucket(ca, ptr->offset);
}
-/*
- * Returns 0 if no pointers or device offline - only for tracepoints!
- */
-static inline size_t PTR_BUCKET_NR_TRACE(const struct bch_fs *c,
- const struct bkey_i *k,
- unsigned ptr)
+static inline struct bpos PTR_BUCKET_POS(const struct bch_fs *c,
+ const struct bch_extent_ptr *ptr)
{
- size_t bucket = 0;
-#if 0
- if (bkey_extent_is_data(&k->k)) {
- const struct bch_extent_ptr *ptr;
-
- extent_for_each_ptr(bkey_i_to_s_c_extent(k), ptr) {
- const struct bch_dev *ca = c->devs[ptr->dev];
- bucket = PTR_BUCKET_NR(ca, ptr);
- break;
- }
- }
-#endif
- return bucket;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+
+ return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
}
-static inline struct bucket *PTR_BUCKET(const struct bch_dev *ca,
- const struct bch_extent_ptr *ptr)
+static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_fs *c,
+ const struct bch_extent_ptr *ptr,
+ u32 *bucket_offset)
{
- return ca->buckets + PTR_BUCKET_NR(ca, ptr);
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+
+ return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
+}
+
+static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
+ const struct bch_extent_ptr *ptr)
+{
+ return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
+}
+
+static inline enum bch_data_type ptr_data_type(const struct bkey *k,
+ const struct bch_extent_ptr *ptr)
+{
+ if (bkey_is_btree_ptr(k))
+ return BCH_DATA_btree;
+
+ return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
+}
+
+static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
+{
+ EBUG_ON(sectors < 0);
+
+ return crc_is_compressed(p.crc)
+ ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
+ p.crc.uncompressed_size)
+ : sectors;
}
static inline int gen_cmp(u8 a, u8 b)
* ptr_stale() - check if a pointer points into a bucket that has been
* invalidated.
*/
-static inline u8 ptr_stale(const struct bch_dev *ca,
+static inline u8 ptr_stale(struct bch_dev *ca,
const struct bch_extent_ptr *ptr)
{
- return gen_after(PTR_BUCKET(ca, ptr)->mark.gen, ptr->gen);
-}
+ u8 ret;
-/* bucket gc marks */
+ rcu_read_lock();
+ ret = gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen);
+ rcu_read_unlock();
-/* The dirty and cached sector counts saturate. If this occurs,
- * reference counting alone will not free the bucket, and a btree
- * GC must be performed. */
-#define GC_MAX_SECTORS_USED ((1U << 15) - 1)
+ return ret;
+}
-static inline unsigned bucket_sectors_used(struct bucket_mark mark)
+/* Device usage: */
+
+void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *);
+static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
{
- return mark.dirty_sectors + mark.cached_sectors;
+ struct bch_dev_usage ret;
+
+ bch2_dev_usage_read_fast(ca, &ret);
+ return ret;
}
-static inline bool bucket_unused(struct bucket_mark mark)
+void bch2_dev_usage_init(struct bch_dev *);
+
+static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum alloc_reserve reserve)
{
- return !mark.owned_by_allocator &&
- !mark.data_type &&
- !bucket_sectors_used(mark);
-}
+ s64 reserved = 0;
+
+ switch (reserve) {
+ case RESERVE_none:
+ reserved += ca->mi.nbuckets >> 6;
+ fallthrough;
+ case RESERVE_movinggc:
+ reserved += ca->nr_btree_reserve;
+ fallthrough;
+ case RESERVE_btree:
+ reserved += ca->nr_btree_reserve;
+ fallthrough;
+ case RESERVE_btree_movinggc:
+ break;
+ }
-/* Per device stats: */
+ return reserved;
+}
-struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *);
-struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *);
+static inline u64 dev_buckets_free(struct bch_dev *ca,
+ struct bch_dev_usage usage,
+ enum alloc_reserve reserve)
+{
+ return max_t(s64, 0,
+ usage.d[BCH_DATA_free].buckets -
+ ca->nr_open_buckets -
+ bch2_dev_buckets_reserved(ca, reserve));
+}
static inline u64 __dev_buckets_available(struct bch_dev *ca,
- struct bch_dev_usage stats)
+ struct bch_dev_usage usage,
+ enum alloc_reserve reserve)
{
return max_t(s64, 0,
- ca->mi.nbuckets - ca->mi.first_bucket -
- stats.buckets[S_META] -
- stats.buckets[S_DIRTY] -
- stats.buckets_alloc);
+ usage.d[BCH_DATA_free].buckets
+ + usage.d[BCH_DATA_cached].buckets
+ + usage.d[BCH_DATA_need_gc_gens].buckets
+ + usage.d[BCH_DATA_need_discard].buckets
+ - ca->nr_open_buckets
+ - bch2_dev_buckets_reserved(ca, reserve));
}
-/*
- * Number of reclaimable buckets - only for use by the allocator thread:
- */
-static inline u64 dev_buckets_available(struct bch_dev *ca)
+static inline u64 dev_buckets_available(struct bch_dev *ca,
+ enum alloc_reserve reserve)
{
- return __dev_buckets_available(ca, bch2_dev_usage_read(ca));
+ return __dev_buckets_available(ca, bch2_dev_usage_read(ca), reserve);
}
-static inline u64 __dev_buckets_free(struct bch_dev *ca,
- struct bch_dev_usage stats)
+/* Filesystem usage: */
+
+static inline unsigned fs_usage_u64s(struct bch_fs *c)
{
- return __dev_buckets_available(ca, stats) +
- fifo_used(&ca->free[RESERVE_NONE]) +
- fifo_used(&ca->free_inc);
+ return sizeof(struct bch_fs_usage) / sizeof(u64) +
+ READ_ONCE(c->replicas.nr);
}
-static inline u64 dev_buckets_free(struct bch_dev *ca)
+static inline unsigned dev_usage_u64s(void)
{
- return __dev_buckets_free(ca, bch2_dev_usage_read(ca));
+ return sizeof(struct bch_dev_usage) / sizeof(u64);
}
-/* Cache set stats: */
+u64 bch2_fs_usage_read_one(struct bch_fs *, u64 *);
-struct bch_fs_usage __bch2_fs_usage_read(struct bch_fs *);
-struct bch_fs_usage bch2_fs_usage_read(struct bch_fs *);
-void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
- struct disk_reservation *, struct gc_pos);
+struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *);
-struct fs_usage_sum {
- u64 data;
- u64 reserved;
-};
+void bch2_fs_usage_acc_to_base(struct bch_fs *, unsigned);
-static inline struct fs_usage_sum __fs_usage_sum(struct bch_fs_usage stats)
-{
- struct fs_usage_sum sum = { 0 };
- unsigned i;
+void bch2_fs_usage_to_text(struct printbuf *,
+ struct bch_fs *, struct bch_fs_usage_online *);
- for (i = 0; i < BCH_REPLICAS_MAX; i++) {
- sum.data += (stats.s[i].data[S_META] +
- stats.s[i].data[S_DIRTY]) * (i + 1);
- sum.reserved += stats.s[i].persistent_reserved * (i + 1);
- }
+u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage_online *);
- sum.reserved += stats.online_reserved;
- return sum;
-}
+struct bch_fs_usage_short
+bch2_fs_usage_read_short(struct bch_fs *);
-static inline u64 __bch2_fs_sectors_used(struct bch_fs *c)
-{
- struct fs_usage_sum sum = __fs_usage_sum(__bch2_fs_usage_read(c));
+/* key/bucket marking: */
- return sum.data + sum.reserved + (sum.reserved >> 7);
-}
+void bch2_fs_usage_initialize(struct bch_fs *);
+
+int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
+ size_t, enum bch_data_type, unsigned,
+ struct gc_pos, unsigned);
+
+int bch2_mark_alloc(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
+int bch2_mark_extent(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
+int bch2_mark_stripe(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
+int bch2_mark_inode(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
+int bch2_mark_reservation(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
+int bch2_mark_reflink_p(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
-static inline u64 bch2_fs_sectors_used(struct bch_fs *c)
+int bch2_trans_mark_extent(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
+int bch2_trans_mark_stripe(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
+int bch2_trans_mark_inode(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
+int bch2_trans_mark_reservation(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
+int bch2_trans_mark_reflink_p(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
+
+void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *);
+int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
+
+int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *,
+ size_t, enum bch_data_type, unsigned);
+int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *);
+
+/* disk reservations: */
+
+static inline void bch2_disk_reservation_put(struct bch_fs *c,
+ struct disk_reservation *res)
{
- return min(c->capacity, __bch2_fs_sectors_used(c));
+ if (res->sectors) {
+ this_cpu_sub(*c->online_reserved, res->sectors);
+ res->sectors = 0;
+ }
}
-static inline bool is_available_bucket(struct bucket_mark mark)
+#define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
+
+int __bch2_disk_reservation_add(struct bch_fs *,
+ struct disk_reservation *,
+ u64, int);
+
+static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
+ u64 sectors, int flags)
{
- return (!mark.owned_by_allocator &&
- mark.data_type == BUCKET_DATA &&
- !mark.dirty_sectors &&
- !mark.nouse);
+#ifdef __KERNEL__
+ u64 old, new;
+
+ do {
+ old = this_cpu_read(c->pcpu->sectors_available);
+ if (sectors > old)
+ return __bch2_disk_reservation_add(c, res, sectors, flags);
+
+ new = old - sectors;
+ } while (this_cpu_cmpxchg(c->pcpu->sectors_available, old, new) != old);
+
+ this_cpu_add(*c->online_reserved, sectors);
+ res->sectors += sectors;
+ return 0;
+#else
+ return __bch2_disk_reservation_add(c, res, sectors, flags);
+#endif
}
-static inline bool bucket_needs_journal_commit(struct bucket_mark m,
- u16 last_seq_ondisk)
+static inline struct disk_reservation
+bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
{
- return m.journal_seq_valid &&
- ((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
+ return (struct disk_reservation) {
+ .sectors = 0,
+#if 0
+ /* not used yet: */
+ .gen = c->capacity_gen,
+#endif
+ .nr_replicas = nr_replicas,
+ };
}
-void bch2_bucket_seq_cleanup(struct bch_fs *);
-
-bool bch2_invalidate_bucket(struct bch_dev *, struct bucket *,
- struct bucket_mark *);
-bool bch2_mark_alloc_bucket_startup(struct bch_dev *, struct bucket *);
-void bch2_mark_free_bucket(struct bch_dev *, struct bucket *);
-void bch2_mark_alloc_bucket(struct bch_dev *, struct bucket *, bool);
-void bch2_mark_metadata_bucket(struct bch_dev *, struct bucket *,
- enum bucket_data_type, bool);
-
-#define BCH_BUCKET_MARK_NOATOMIC (1 << 0)
-#define BCH_BUCKET_MARK_GC_WILL_VISIT (1 << 1)
-#define BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE (1 << 2)
-
-void __bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool,
- struct bch_fs_usage *, u64, unsigned);
+static inline int bch2_disk_reservation_get(struct bch_fs *c,
+ struct disk_reservation *res,
+ u64 sectors, unsigned nr_replicas,
+ int flags)
+{
+ *res = bch2_disk_reservation_init(c, nr_replicas);
-void bch2_gc_mark_key(struct bch_fs *, struct bkey_s_c,
- s64, bool, unsigned);
-void bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool,
- struct gc_pos, struct bch_fs_usage *, u64);
+ return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
+}
-void bch2_recalc_sectors_available(struct bch_fs *);
+#define RESERVE_FACTOR 6
-void bch2_disk_reservation_put(struct bch_fs *,
- struct disk_reservation *);
+static inline u64 avail_factor(u64 r)
+{
+ return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
+}
-#define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
-#define BCH_DISK_RESERVATION_METADATA (1 << 1)
-#define BCH_DISK_RESERVATION_GC_LOCK_HELD (1 << 2)
-#define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD (1 << 3)
-
-int bch2_disk_reservation_add(struct bch_fs *,
- struct disk_reservation *,
- unsigned, int);
-int bch2_disk_reservation_get(struct bch_fs *,
- struct disk_reservation *,
- unsigned, int);
+int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
+void bch2_dev_buckets_free(struct bch_dev *);
+int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
#endif /* _BUCKETS_H */