X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbuckets.h;h=2c95cc5d86be661c6d6a0783d366d5d8b8b919d7;hb=b5fd066153c40a70a29caa1ea7987723ab687763;hp=392e03d4c319e8039bf30c4eea4896c1f047b0e6;hpb=d34e731082d8fcd710c2af6377a3b7fa927c8451;p=bcachefs-tools-debian diff --git a/libbcachefs/buckets.h b/libbcachefs/buckets.h index 392e03d..2c95cc5 100644 --- a/libbcachefs/buckets.h +++ b/libbcachefs/buckets.h @@ -9,58 +9,93 @@ #define _BUCKETS_H #include "buckets_types.h" -#include "super.h" +#include "extents.h" +#include "sb-members.h" + +static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s) +{ + return div_u64(s, ca->mi.bucket_size); +} + +static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b) +{ + return ((sector_t) b) * ca->mi.bucket_size; +} + +static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s) +{ + u32 remainder; + + div_u64_rem(s, ca->mi.bucket_size, &remainder); + return remainder; +} + +static inline size_t sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, + u32 *offset) +{ + return div_u64_rem(s, ca->mi.bucket_size, offset); +} #define for_each_bucket(_b, _buckets) \ for (_b = (_buckets)->b + (_buckets)->first_bucket; \ _b < (_buckets)->b + (_buckets)->nbuckets; _b++) -#define bucket_cmpxchg(g, new, expr) \ -({ \ - struct bucket *_g = g; \ - u64 _v = atomic64_read(&(g)->_mark.v); \ - struct bucket_mark _old; \ - \ - do { \ - (new).v.counter = _old.v.counter = _v; \ - expr; \ - } while ((_v = atomic64_cmpxchg(&(_g)->_mark.v, \ - _old.v.counter, \ - (new).v.counter)) != _old.v.counter);\ - _old; \ -}) +/* + * Ugly hack alert: + * + * We need to cram a spinlock in a single byte, because that's what we have left + * in struct bucket, and we care about the size of these - during fsck, we need + * in memory state for every single bucket on every device. + * + * We used to do + * while (xchg(&b->lock, 1) cpu_relax(); + * but, it turns out not all architectures support xchg on a single byte. + * + * So now we use bit_spin_lock(), with fun games since we can't burn a whole + * ulong for this - we just need to make sure the lock bit always ends up in the + * first byte. + */ -static inline struct bucket_array *__bucket_array(struct bch_dev *ca, - bool gc) +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define BUCKET_LOCK_BITNR 0 +#else +#define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1) +#endif + +union ulong_byte_assert { + ulong ulong; + u8 byte; +}; + +static inline void bucket_unlock(struct bucket *b) { - return rcu_dereference_check(ca->buckets[gc], - !ca->fs || - percpu_rwsem_is_held(&ca->fs->mark_lock) || - lockdep_is_held(&ca->fs->gc_lock) || - lockdep_is_held(&ca->bucket_lock)); + BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte); + + clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock); + wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR); } -static inline struct bucket_array *bucket_array(struct bch_dev *ca) +static inline void bucket_lock(struct bucket *b) { - return __bucket_array(ca, false); + wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR, + TASK_UNINTERRUPTIBLE); } -static inline struct bucket *__bucket(struct bch_dev *ca, size_t b, bool gc) +static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca) { - struct bucket_array *buckets = __bucket_array(ca, gc); - - BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets); - return buckets->b + b; + return rcu_dereference_check(ca->buckets_gc, + !ca->fs || + percpu_rwsem_is_held(&ca->fs->mark_lock) || + lockdep_is_held(&ca->fs->gc_lock) || + lockdep_is_held(&ca->bucket_lock)); } static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b) { - return __bucket(ca, b, true); -} + struct bucket_array *buckets = gc_bucket_array(ca); -static inline struct bucket *bucket(struct bch_dev *ca, size_t b) -{ - return __bucket(ca, b, false); + BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets); + return buckets->b + b; } static inline struct bucket_gens *bucket_gens(struct bch_dev *ca) @@ -70,7 +105,6 @@ static inline struct bucket_gens *bucket_gens(struct bch_dev *ca) percpu_rwsem_is_held(&ca->fs->mark_lock) || lockdep_is_held(&ca->fs->gc_lock) || lockdep_is_held(&ca->bucket_lock)); - } static inline u8 *bucket_gen(struct bch_dev *ca, size_t b) @@ -81,20 +115,27 @@ static inline u8 *bucket_gen(struct bch_dev *ca, size_t b) return gens->b + b; } -/* - * bucket_gc_gen() returns the difference between the bucket's current gen and - * the oldest gen of any pointer into that bucket in the btree. - */ - -static inline u8 bucket_gc_gen(struct bucket *g) +static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca, + const struct bch_extent_ptr *ptr) { - return g->mark.gen - g->oldest_gen; + return sector_to_bucket(ca, ptr->offset); } -static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca, +static inline struct bpos PTR_BUCKET_POS(const struct bch_fs *c, const struct bch_extent_ptr *ptr) { - return sector_to_bucket(ca, ptr->offset); + struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); + + return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr)); +} + +static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_fs *c, + const struct bch_extent_ptr *ptr, + u32 *bucket_offset) +{ + struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); + + return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset)); } static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca, @@ -106,13 +147,22 @@ static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca, static inline enum bch_data_type ptr_data_type(const struct bkey *k, const struct bch_extent_ptr *ptr) { - if (k->type == KEY_TYPE_btree_ptr || - k->type == KEY_TYPE_btree_ptr_v2) + if (bkey_is_btree_ptr(k)) return BCH_DATA_btree; return ptr->cached ? BCH_DATA_cached : BCH_DATA_user; } +static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p) +{ + EBUG_ON(sectors < 0); + + return crc_is_compressed(p.crc) + ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size, + p.crc.uncompressed_size) + : sectors; +} + static inline int gen_cmp(u8 a, u8 b) { return (s8) (a - b); @@ -141,64 +191,96 @@ static inline u8 ptr_stale(struct bch_dev *ca, return ret; } -/* bucket gc marks */ +/* Device usage: */ -static inline bool is_available_bucket(struct bucket_mark mark) +void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *); +static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca) { - return !mark.dirty_sectors && !mark.stripe; -} + struct bch_dev_usage ret; -/* Device usage: */ + bch2_dev_usage_read_fast(ca, &ret); + return ret; +} -struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *); +void bch2_dev_usage_init(struct bch_dev *); +void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev_usage *); -static inline u64 __dev_buckets_available(struct bch_dev *ca, - struct bch_dev_usage stats) +static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark) { - u64 total = ca->mi.nbuckets - ca->mi.first_bucket; - - if (WARN_ONCE(stats.buckets_unavailable > total, - "buckets_unavailable overflow (%llu > %llu)\n", - stats.buckets_unavailable, total)) - return 0; - - return total - stats.buckets_unavailable; + s64 reserved = 0; + + switch (watermark) { + case BCH_WATERMARK_NR: + BUG(); + case BCH_WATERMARK_stripe: + reserved += ca->mi.nbuckets >> 6; + fallthrough; + case BCH_WATERMARK_normal: + reserved += ca->mi.nbuckets >> 6; + fallthrough; + case BCH_WATERMARK_copygc: + reserved += ca->nr_btree_reserve; + fallthrough; + case BCH_WATERMARK_btree: + reserved += ca->nr_btree_reserve; + fallthrough; + case BCH_WATERMARK_btree_copygc: + case BCH_WATERMARK_reclaim: + break; + } + + return reserved; } -static inline u64 dev_buckets_available(struct bch_dev *ca) +static inline u64 dev_buckets_free(struct bch_dev *ca, + struct bch_dev_usage usage, + enum bch_watermark watermark) { - return __dev_buckets_available(ca, bch2_dev_usage_read(ca)); + return max_t(s64, 0, + usage.d[BCH_DATA_free].buckets - + ca->nr_open_buckets - + bch2_dev_buckets_reserved(ca, watermark)); } -static inline u64 __dev_buckets_reclaimable(struct bch_dev *ca, - struct bch_dev_usage stats) +static inline u64 __dev_buckets_available(struct bch_dev *ca, + struct bch_dev_usage usage, + enum bch_watermark watermark) { - struct bch_fs *c = ca->fs; - s64 available = __dev_buckets_available(ca, stats); - unsigned i; - - spin_lock(&c->freelist_lock); - for (i = 0; i < RESERVE_NR; i++) - available -= fifo_used(&ca->free[i]); - available -= fifo_used(&ca->free_inc); - available -= ca->nr_open_buckets; - spin_unlock(&c->freelist_lock); - - return max(available, 0LL); + return max_t(s64, 0, + usage.d[BCH_DATA_free].buckets + + usage.d[BCH_DATA_cached].buckets + + usage.d[BCH_DATA_need_gc_gens].buckets + + usage.d[BCH_DATA_need_discard].buckets + - ca->nr_open_buckets + - bch2_dev_buckets_reserved(ca, watermark)); } -static inline u64 dev_buckets_reclaimable(struct bch_dev *ca) +static inline u64 dev_buckets_available(struct bch_dev *ca, + enum bch_watermark watermark) { - return __dev_buckets_reclaimable(ca, bch2_dev_usage_read(ca)); + return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark); } /* Filesystem usage: */ +static inline unsigned __fs_usage_u64s(unsigned nr_replicas) +{ + return sizeof(struct bch_fs_usage) / sizeof(u64) + nr_replicas; +} + static inline unsigned fs_usage_u64s(struct bch_fs *c) { + return __fs_usage_u64s(READ_ONCE(c->replicas.nr)); +} + +static inline unsigned __fs_usage_online_u64s(unsigned nr_replicas) +{ + return sizeof(struct bch_fs_usage_online) / sizeof(u64) + nr_replicas; +} - return sizeof(struct bch_fs_usage) / sizeof(u64) + - READ_ONCE(c->replicas.nr); +static inline unsigned fs_usage_online_u64s(struct bch_fs *c) +{ + return __fs_usage_online_u64s(READ_ONCE(c->replicas.nr)); } static inline unsigned dev_usage_u64s(void) @@ -220,77 +302,127 @@ u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage_online *); struct bch_fs_usage_short bch2_fs_usage_read_short(struct bch_fs *); -/* key/bucket marking: */ - -void bch2_fs_usage_initialize(struct bch_fs *); - -void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *, size_t, bool); -void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *, - size_t, enum bch_data_type, unsigned, - struct gc_pos, unsigned); - -int bch2_mark_alloc(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned); -int bch2_mark_extent(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned); -int bch2_mark_stripe(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned); -int bch2_mark_inode(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned); -int bch2_mark_reservation(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned); -int bch2_mark_reflink_p(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned); - -int bch2_trans_mark_extent(struct btree_trans *, struct bkey_s_c, struct bkey_i *, unsigned); -int bch2_trans_mark_stripe(struct btree_trans *, struct bkey_s_c, struct bkey_i *, unsigned); -int bch2_trans_mark_inode(struct btree_trans *, struct bkey_s_c, struct bkey_i *, unsigned); -int bch2_trans_mark_reservation(struct btree_trans *, struct bkey_s_c, struct bkey_i *, unsigned); -int bch2_trans_mark_reflink_p(struct btree_trans *, struct bkey_s_c, struct bkey_i *, unsigned); - -int bch2_mark_key(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned); +void bch2_dev_usage_update(struct bch_fs *, struct bch_dev *, + const struct bch_alloc_v4 *, + const struct bch_alloc_v4 *, u64, bool); +void bch2_dev_usage_update_m(struct bch_fs *, struct bch_dev *, + struct bucket *, struct bucket *); -int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, - struct bkey_i *, unsigned); +/* key/bucket marking: */ -static inline int bch2_trans_mark_old(struct btree_trans *trans, - struct bkey_s_c old, unsigned flags) +static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c, + unsigned journal_seq, + bool gc) { - struct bkey_i deleted; - - bkey_init(&deleted.k); - deleted.k.p = old.k->p; + percpu_rwsem_assert_held(&c->mark_lock); + BUG_ON(!gc && !journal_seq); - return bch2_trans_mark_key(trans, old, &deleted, - BTREE_TRIGGER_OVERWRITE|flags); + return this_cpu_ptr(gc + ? c->usage_gc + : c->usage[journal_seq & JOURNAL_BUF_MASK]); } -static inline int bch2_trans_mark_new(struct btree_trans *trans, - struct bkey_i *new, unsigned flags) -{ - struct bkey_i deleted; +int bch2_update_replicas(struct bch_fs *, struct bkey_s_c, + struct bch_replicas_entry_v1 *, s64, + unsigned, bool); +int bch2_update_replicas_list(struct btree_trans *, + struct bch_replicas_entry_v1 *, s64); +int bch2_update_cached_sectors_list(struct btree_trans *, unsigned, s64); +int bch2_replicas_deltas_realloc(struct btree_trans *, unsigned); - bkey_init(&deleted.k); - deleted.k.p = new->k.p; +void bch2_fs_usage_initialize(struct bch_fs *); - return bch2_trans_mark_key(trans, bkey_i_to_s_c(&deleted), new, - BTREE_TRIGGER_INSERT|flags); -} +int bch2_check_bucket_ref(struct btree_trans *, struct bkey_s_c, + const struct bch_extent_ptr *, + s64, enum bch_data_type, u8, u8, u32); + +int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *, + size_t, enum bch_data_type, unsigned, + struct gc_pos, unsigned); + +int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned, + struct bkey_s_c, struct bkey_s, unsigned); +int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned, + struct bkey_s_c, struct bkey_s, unsigned); + +#define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\ +({ \ + int ret = 0; \ + \ + if (_old.k->type) \ + ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_INSERT); \ + if (!ret && _new.k->type) \ + ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_OVERWRITE);\ + ret; \ +}) +void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *); int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *); int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, size_t, enum bch_data_type, unsigned); int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *); +int bch2_trans_mark_dev_sbs(struct bch_fs *); + +static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b) +{ + struct bch_sb_layout *layout = &ca->disk_sb.sb->layout; + u64 b_offset = bucket_to_sector(ca, b); + u64 b_end = bucket_to_sector(ca, b + 1); + unsigned i; + + if (!b) + return true; + + for (i = 0; i < layout->nr_superblocks; i++) { + u64 offset = le64_to_cpu(layout->sb_offset[i]); + u64 end = offset + (1 << layout->sb_max_size_bits); + + if (!(offset >= b_end || end <= b_offset)) + return true; + } + + return false; +} /* disk reservations: */ static inline void bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res) { - this_cpu_sub(*c->online_reserved, res->sectors); - res->sectors = 0; + if (res->sectors) { + this_cpu_sub(*c->online_reserved, res->sectors); + res->sectors = 0; + } } #define BCH_DISK_RESERVATION_NOFAIL (1 << 0) -int bch2_disk_reservation_add(struct bch_fs *, - struct disk_reservation *, - u64, int); +int __bch2_disk_reservation_add(struct bch_fs *, + struct disk_reservation *, + u64, int); + +static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, + u64 sectors, int flags) +{ +#ifdef __KERNEL__ + u64 old, new; + + do { + old = this_cpu_read(c->pcpu->sectors_available); + if (sectors > old) + return __bch2_disk_reservation_add(c, res, sectors, flags); + + new = old - sectors; + } while (this_cpu_cmpxchg(c->pcpu->sectors_available, old, new) != old); + + this_cpu_add(*c->online_reserved, sectors); + res->sectors += sectors; + return 0; +#else + return __bch2_disk_reservation_add(c, res, sectors, flags); +#endif +} static inline struct disk_reservation bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)