1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
11 #include "buckets_types.h"
14 #define for_each_bucket(_b, _buckets) \
15 for (_b = (_buckets)->b + (_buckets)->first_bucket; \
16 _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
18 #define bucket_cmpxchg(g, new, expr) \
20 struct bucket *_g = g; \
21 u64 _v = atomic64_read(&(g)->_mark.v); \
22 struct bucket_mark _old; \
25 (new).v.counter = _old.v.counter = _v; \
27 } while ((_v = atomic64_cmpxchg(&(_g)->_mark.v, \
29 (new).v.counter)) != _old.v.counter);\
33 static inline struct bucket_array *__bucket_array(struct bch_dev *ca,
36 return rcu_dereference_check(ca->buckets[gc],
38 percpu_rwsem_is_held(&ca->fs->mark_lock) ||
39 lockdep_is_held(&ca->fs->gc_lock) ||
40 lockdep_is_held(&ca->bucket_lock));
43 static inline struct bucket_array *bucket_array(struct bch_dev *ca)
45 return __bucket_array(ca, false);
48 static inline struct bucket *__bucket(struct bch_dev *ca, size_t b, bool gc)
50 struct bucket_array *buckets = __bucket_array(ca, gc);
52 BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
53 return buckets->b + b;
56 static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
58 return __bucket(ca, b, false);
62 * bucket_gc_gen() returns the difference between the bucket's current gen and
63 * the oldest gen of any pointer into that bucket in the btree.
66 static inline u8 bucket_gc_gen(struct bucket *g)
68 return g->mark.gen - g->oldest_gen;
71 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
72 const struct bch_extent_ptr *ptr)
74 return sector_to_bucket(ca, ptr->offset);
77 static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
78 const struct bch_extent_ptr *ptr,
81 return __bucket(ca, PTR_BUCKET_NR(ca, ptr), gc);
84 static inline enum bch_data_type ptr_data_type(const struct bkey *k,
85 const struct bch_extent_ptr *ptr)
87 if (k->type == KEY_TYPE_btree_ptr ||
88 k->type == KEY_TYPE_btree_ptr_v2)
89 return BCH_DATA_btree;
91 return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
94 static inline struct bucket_mark ptr_bucket_mark(struct bch_dev *ca,
95 const struct bch_extent_ptr *ptr)
100 m = READ_ONCE(PTR_BUCKET(ca, ptr, 0)->mark);
106 static inline int gen_cmp(u8 a, u8 b)
111 static inline int gen_after(u8 a, u8 b)
113 int r = gen_cmp(a, b);
115 return r > 0 ? r : 0;
119 * ptr_stale() - check if a pointer points into a bucket that has been
122 static inline u8 ptr_stale(struct bch_dev *ca,
123 const struct bch_extent_ptr *ptr)
125 return gen_after(ptr_bucket_mark(ca, ptr).gen, ptr->gen);
128 static inline s64 __ptr_disk_sectors(struct extent_ptr_decoded p,
131 return live_size && p.crc.compression_type
132 ? max(1U, DIV_ROUND_UP(live_size * p.crc.compressed_size,
133 p.crc.uncompressed_size))
137 static inline s64 ptr_disk_sectors(struct extent_ptr_decoded p)
139 return __ptr_disk_sectors(p, p.crc.live_size);
142 /* bucket gc marks */
144 static inline unsigned bucket_sectors_used(struct bucket_mark mark)
146 return mark.dirty_sectors + mark.cached_sectors;
149 static inline bool is_available_bucket(struct bucket_mark mark)
151 return !mark.dirty_sectors && !mark.stripe;
154 static inline bool bucket_needs_journal_commit(struct bucket_mark m,
157 return m.journal_seq_valid &&
158 ((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
163 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *);
165 static inline u64 __dev_buckets_available(struct bch_dev *ca,
166 struct bch_dev_usage stats)
168 u64 total = ca->mi.nbuckets - ca->mi.first_bucket;
170 if (WARN_ONCE(stats.buckets_unavailable > total,
171 "buckets_unavailable overflow (%llu > %llu)\n",
172 stats.buckets_unavailable, total))
175 return total - stats.buckets_unavailable;
178 static inline u64 dev_buckets_available(struct bch_dev *ca)
180 return __dev_buckets_available(ca, bch2_dev_usage_read(ca));
183 static inline u64 __dev_buckets_reclaimable(struct bch_dev *ca,
184 struct bch_dev_usage stats)
186 struct bch_fs *c = ca->fs;
187 s64 available = __dev_buckets_available(ca, stats);
190 spin_lock(&c->freelist_lock);
191 for (i = 0; i < RESERVE_NR; i++)
192 available -= fifo_used(&ca->free[i]);
193 available -= fifo_used(&ca->free_inc);
194 available -= ca->nr_open_buckets;
195 spin_unlock(&c->freelist_lock);
197 return max(available, 0LL);
200 static inline u64 dev_buckets_reclaimable(struct bch_dev *ca)
202 return __dev_buckets_reclaimable(ca, bch2_dev_usage_read(ca));
205 /* Filesystem usage: */
207 static inline unsigned fs_usage_u64s(struct bch_fs *c)
210 return sizeof(struct bch_fs_usage) / sizeof(u64) +
211 READ_ONCE(c->replicas.nr);
214 static inline unsigned dev_usage_u64s(void)
216 return sizeof(struct bch_dev_usage) / sizeof(u64);
219 u64 bch2_fs_usage_read_one(struct bch_fs *, u64 *);
221 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *);
223 void bch2_fs_usage_acc_to_base(struct bch_fs *, unsigned);
225 void bch2_fs_usage_to_text(struct printbuf *,
226 struct bch_fs *, struct bch_fs_usage_online *);
228 u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage_online *);
230 struct bch_fs_usage_short
231 bch2_fs_usage_read_short(struct bch_fs *);
233 /* key/bucket marking: */
235 void bch2_bucket_seq_cleanup(struct bch_fs *);
236 void bch2_fs_usage_initialize(struct bch_fs *);
238 void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *, size_t, bool);
239 void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
240 size_t, enum bch_data_type, unsigned,
241 struct gc_pos, unsigned);
243 int bch2_mark_key(struct bch_fs *, struct bkey_s_c, unsigned,
244 s64, struct bch_fs_usage *, u64, unsigned);
246 int bch2_mark_update(struct btree_trans *, struct btree_iter *,
247 struct bkey_i *, struct bch_fs_usage *, unsigned);
249 int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, struct bkey_s_c,
250 unsigned, s64, unsigned);
251 int bch2_trans_mark_update(struct btree_trans *, struct btree_iter *iter,
252 struct bkey_i *insert, unsigned);
253 void bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
255 int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *,
256 size_t, enum bch_data_type, unsigned);
257 int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *);
259 /* disk reservations: */
261 static inline void bch2_disk_reservation_put(struct bch_fs *c,
262 struct disk_reservation *res)
264 this_cpu_sub(*c->online_reserved, res->sectors);
268 #define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
270 int bch2_disk_reservation_add(struct bch_fs *,
271 struct disk_reservation *,
274 static inline struct disk_reservation
275 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
277 return (struct disk_reservation) {
281 .gen = c->capacity_gen,
283 .nr_replicas = nr_replicas,
287 static inline int bch2_disk_reservation_get(struct bch_fs *c,
288 struct disk_reservation *res,
289 u64 sectors, unsigned nr_replicas,
292 *res = bch2_disk_reservation_init(c, nr_replicas);
294 return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
297 #define RESERVE_FACTOR 6
299 static inline u64 avail_factor(u64 r)
301 return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
304 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
305 void bch2_dev_buckets_free(struct bch_dev *);
306 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
308 #endif /* _BUCKETS_H */