2 * Code for manipulating bucket marks for garbage collection.
4 * Copyright 2014 Datera, Inc.
10 #include "buckets_types.h"
13 #define for_each_bucket(b, ca) \
14 for (b = (ca)->buckets + (ca)->mi.first_bucket; \
15 b < (ca)->buckets + (ca)->mi.nbuckets; b++)
17 #define bucket_cmpxchg(g, new, expr) \
19 u64 _v = READ_ONCE((g)->_mark.counter); \
20 struct bucket_mark _old; \
23 (new).counter = _old.counter = _v; \
25 } while ((_v = cmpxchg(&(g)->_mark.counter, \
27 (new).counter)) != _old.counter);\
32 * bucket_gc_gen() returns the difference between the bucket's current gen and
33 * the oldest gen of any pointer into that bucket in the btree.
36 static inline u8 bucket_gc_gen(struct bch_dev *ca, struct bucket *g)
38 unsigned long r = g - ca->buckets;
39 return g->mark.gen - ca->oldest_gens[r];
42 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
43 const struct bch_extent_ptr *ptr)
45 return sector_to_bucket(ca, ptr->offset);
49 * Returns 0 if no pointers or device offline - only for tracepoints!
51 static inline size_t PTR_BUCKET_NR_TRACE(const struct bch_fs *c,
52 const struct bkey_i *k,
57 if (bkey_extent_is_data(&k->k)) {
58 const struct bch_extent_ptr *ptr;
60 extent_for_each_ptr(bkey_i_to_s_c_extent(k), ptr) {
61 const struct bch_dev *ca = c->devs[ptr->dev];
62 bucket = PTR_BUCKET_NR(ca, ptr);
70 static inline struct bucket *PTR_BUCKET(const struct bch_dev *ca,
71 const struct bch_extent_ptr *ptr)
73 return ca->buckets + PTR_BUCKET_NR(ca, ptr);
76 static inline int gen_cmp(u8 a, u8 b)
81 static inline int gen_after(u8 a, u8 b)
83 int r = gen_cmp(a, b);
89 * ptr_stale() - check if a pointer points into a bucket that has been
92 static inline u8 ptr_stale(const struct bch_dev *ca,
93 const struct bch_extent_ptr *ptr)
95 return gen_after(PTR_BUCKET(ca, ptr)->mark.gen, ptr->gen);
100 /* The dirty and cached sector counts saturate. If this occurs,
101 * reference counting alone will not free the bucket, and a btree
102 * GC must be performed. */
103 #define GC_MAX_SECTORS_USED ((1U << 15) - 1)
105 static inline unsigned bucket_sectors_used(struct bucket_mark mark)
107 return mark.dirty_sectors + mark.cached_sectors;
110 static inline bool bucket_unused(struct bucket_mark mark)
112 return !mark.owned_by_allocator &&
114 !bucket_sectors_used(mark);
117 /* Per device stats: */
119 struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *);
120 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *);
122 static inline u64 __dev_buckets_available(struct bch_dev *ca,
123 struct bch_dev_usage stats)
126 ca->mi.nbuckets - ca->mi.first_bucket -
127 stats.buckets[S_META] -
128 stats.buckets[S_DIRTY] -
129 stats.buckets_alloc);
133 * Number of reclaimable buckets - only for use by the allocator thread:
135 static inline u64 dev_buckets_available(struct bch_dev *ca)
137 return __dev_buckets_available(ca, bch2_dev_usage_read(ca));
140 static inline u64 __dev_buckets_free(struct bch_dev *ca,
141 struct bch_dev_usage stats)
143 return __dev_buckets_available(ca, stats) +
144 fifo_used(&ca->free[RESERVE_NONE]) +
145 fifo_used(&ca->free_inc);
148 static inline u64 dev_buckets_free(struct bch_dev *ca)
150 return __dev_buckets_free(ca, bch2_dev_usage_read(ca));
153 /* Cache set stats: */
155 struct bch_fs_usage __bch2_fs_usage_read(struct bch_fs *);
156 struct bch_fs_usage bch2_fs_usage_read(struct bch_fs *);
157 void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
158 struct disk_reservation *, struct gc_pos);
160 struct fs_usage_sum {
165 static inline struct fs_usage_sum __fs_usage_sum(struct bch_fs_usage stats)
167 struct fs_usage_sum sum = { 0 };
170 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
171 sum.data += (stats.s[i].data[S_META] +
172 stats.s[i].data[S_DIRTY]) * (i + 1);
173 sum.reserved += stats.s[i].persistent_reserved * (i + 1);
176 sum.reserved += stats.online_reserved;
180 static inline u64 __bch2_fs_sectors_used(struct bch_fs *c)
182 struct fs_usage_sum sum = __fs_usage_sum(__bch2_fs_usage_read(c));
184 return sum.data + sum.reserved + (sum.reserved >> 7);
187 static inline u64 bch2_fs_sectors_used(struct bch_fs *c)
189 return min(c->capacity, __bch2_fs_sectors_used(c));
192 static inline bool is_available_bucket(struct bucket_mark mark)
194 return (!mark.owned_by_allocator &&
195 mark.data_type == BUCKET_DATA &&
196 !mark.dirty_sectors &&
200 static inline bool bucket_needs_journal_commit(struct bucket_mark m,
203 return m.journal_seq_valid &&
204 ((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
207 void bch2_bucket_seq_cleanup(struct bch_fs *);
209 bool bch2_invalidate_bucket(struct bch_dev *, struct bucket *,
210 struct bucket_mark *);
211 bool bch2_mark_alloc_bucket_startup(struct bch_dev *, struct bucket *);
212 void bch2_mark_free_bucket(struct bch_dev *, struct bucket *);
213 void bch2_mark_alloc_bucket(struct bch_dev *, struct bucket *, bool);
214 void bch2_mark_metadata_bucket(struct bch_dev *, struct bucket *,
215 enum bucket_data_type, bool);
217 #define BCH_BUCKET_MARK_NOATOMIC (1 << 0)
218 #define BCH_BUCKET_MARK_GC_WILL_VISIT (1 << 1)
219 #define BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE (1 << 2)
221 void __bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool,
222 struct bch_fs_usage *, u64, unsigned);
224 void bch2_gc_mark_key(struct bch_fs *, struct bkey_s_c,
225 s64, bool, unsigned);
226 void bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool,
227 struct gc_pos, struct bch_fs_usage *, u64);
229 void bch2_recalc_sectors_available(struct bch_fs *);
231 void bch2_disk_reservation_put(struct bch_fs *,
232 struct disk_reservation *);
234 #define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
235 #define BCH_DISK_RESERVATION_METADATA (1 << 1)
236 #define BCH_DISK_RESERVATION_GC_LOCK_HELD (1 << 2)
237 #define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD (1 << 3)
239 int bch2_disk_reservation_add(struct bch_fs *,
240 struct disk_reservation *,
242 int bch2_disk_reservation_get(struct bch_fs *,
243 struct disk_reservation *,
246 #endif /* _BUCKETS_H */