2 * Code for manipulating bucket marks for garbage collection.
4 * Copyright 2014 Datera, Inc.
10 #include "buckets_types.h"
13 #define for_each_bucket(_b, _buckets) \
14 for (_b = (_buckets)->b + (_buckets)->first_bucket; \
15 _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
17 #define bucket_cmpxchg(g, new, expr) \
19 struct bucket *_g = g; \
20 u64 _v = atomic64_read(&(g)->_mark.v); \
21 struct bucket_mark _old; \
24 (new).v.counter = _old.v.counter = _v; \
26 } while ((_v = atomic64_cmpxchg(&(_g)->_mark.v, \
28 (new).v.counter)) != _old.v.counter);\
32 static inline struct bucket_array *__bucket_array(struct bch_dev *ca,
35 return rcu_dereference_check(ca->buckets[gc],
37 percpu_rwsem_is_held(&ca->fs->mark_lock) ||
38 lockdep_is_held(&ca->fs->gc_lock) ||
39 lockdep_is_held(&ca->bucket_lock));
42 static inline struct bucket_array *bucket_array(struct bch_dev *ca)
44 return __bucket_array(ca, false);
47 static inline struct bucket *__bucket(struct bch_dev *ca, size_t b, bool gc)
49 struct bucket_array *buckets = __bucket_array(ca, gc);
51 BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
52 return buckets->b + b;
55 static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
57 return __bucket(ca, b, false);
60 static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
63 bucket(ca, b)->io_time[rw] = c->bucket_clock[rw].hand;
66 static inline u16 bucket_last_io(struct bch_fs *c, struct bucket *g, int rw)
68 return c->bucket_clock[rw].hand - g->io_time[rw];
72 * bucket_gc_gen() returns the difference between the bucket's current gen and
73 * the oldest gen of any pointer into that bucket in the btree.
76 static inline u8 bucket_gc_gen(struct bch_dev *ca, size_t b)
78 struct bucket *g = bucket(ca, b);
80 return g->mark.gen - g->oldest_gen;
83 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
84 const struct bch_extent_ptr *ptr)
86 return sector_to_bucket(ca, ptr->offset);
89 static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
90 const struct bch_extent_ptr *ptr,
93 return __bucket(ca, PTR_BUCKET_NR(ca, ptr), gc);
96 static inline struct bucket_mark ptr_bucket_mark(struct bch_dev *ca,
97 const struct bch_extent_ptr *ptr)
102 m = READ_ONCE(bucket(ca, PTR_BUCKET_NR(ca, ptr))->mark);
108 static inline int gen_cmp(u8 a, u8 b)
113 static inline int gen_after(u8 a, u8 b)
115 int r = gen_cmp(a, b);
117 return r > 0 ? r : 0;
121 * ptr_stale() - check if a pointer points into a bucket that has been
124 static inline u8 ptr_stale(struct bch_dev *ca,
125 const struct bch_extent_ptr *ptr)
127 return gen_after(ptr_bucket_mark(ca, ptr).gen, ptr->gen);
130 static inline unsigned __ptr_disk_sectors(struct extent_ptr_decoded p,
133 return live_size && p.crc.compression_type
134 ? max(1U, DIV_ROUND_UP(live_size * p.crc.compressed_size,
135 p.crc.uncompressed_size))
139 static inline unsigned ptr_disk_sectors(struct extent_ptr_decoded p)
141 return __ptr_disk_sectors(p, p.crc.live_size);
144 /* bucket gc marks */
146 static inline unsigned bucket_sectors_used(struct bucket_mark mark)
148 return mark.dirty_sectors + mark.cached_sectors;
151 static inline bool bucket_unused(struct bucket_mark mark)
153 return !mark.owned_by_allocator &&
155 !bucket_sectors_used(mark);
158 static inline bool is_available_bucket(struct bucket_mark mark)
160 return (!mark.owned_by_allocator &&
161 !mark.dirty_sectors &&
165 static inline bool bucket_needs_journal_commit(struct bucket_mark m,
168 return m.journal_seq_valid &&
169 ((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
174 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *, struct bch_dev *);
176 void bch2_dev_usage_from_buckets(struct bch_fs *, struct bch_dev *);
178 static inline u64 __dev_buckets_available(struct bch_dev *ca,
179 struct bch_dev_usage stats)
181 u64 total = ca->mi.nbuckets - ca->mi.first_bucket;
183 if (WARN_ONCE(stats.buckets_unavailable > total,
184 "buckets_unavailable overflow (%llu > %llu)\n",
185 stats.buckets_unavailable, total))
188 return total - stats.buckets_unavailable;
192 * Number of reclaimable buckets - only for use by the allocator thread:
194 static inline u64 dev_buckets_available(struct bch_fs *c, struct bch_dev *ca)
196 return __dev_buckets_available(ca, bch2_dev_usage_read(c, ca));
199 static inline u64 __dev_buckets_free(struct bch_dev *ca,
200 struct bch_dev_usage stats)
202 return __dev_buckets_available(ca, stats) +
203 fifo_used(&ca->free[RESERVE_NONE]) +
204 fifo_used(&ca->free_inc);
207 static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
209 return __dev_buckets_free(ca, bch2_dev_usage_read(c, ca));
212 /* Filesystem usage: */
214 static inline unsigned fs_usage_u64s(struct bch_fs *c)
217 return sizeof(struct bch_fs_usage) / sizeof(u64) +
218 READ_ONCE(c->replicas.nr);
221 static inline struct bch_fs_usage *bch2_fs_usage_get_scratch(struct bch_fs *c)
223 struct bch_fs_usage *ret = this_cpu_ptr(c->usage_scratch);
225 memset(ret, 0, fs_usage_u64s(c) * sizeof(u64));
229 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *);
231 u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage *);
233 struct bch_fs_usage_short
234 bch2_fs_usage_read_short(struct bch_fs *);
236 /* key/bucket marking: */
238 void bch2_bucket_seq_cleanup(struct bch_fs *);
239 void bch2_fs_usage_initialize(struct bch_fs *);
241 void bch2_invalidate_bucket(struct bch_fs *, struct bch_dev *,
242 size_t, struct bucket_mark *);
243 void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *,
244 size_t, bool, struct gc_pos, unsigned);
245 void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
246 size_t, enum bch_data_type, unsigned,
247 struct gc_pos, unsigned);
249 #define BCH_BUCKET_MARK_GC (1 << 0)
250 #define BCH_BUCKET_MARK_NOATOMIC (1 << 1)
252 int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c,
253 bool, s64, struct gc_pos,
254 struct bch_fs_usage *, u64, unsigned);
255 int bch2_mark_key(struct bch_fs *, struct bkey_s_c,
256 bool, s64, struct gc_pos,
257 struct bch_fs_usage *, u64, unsigned);
258 void bch2_mark_update(struct btree_insert *, struct btree_insert_entry *);
259 int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
260 struct disk_reservation *);
262 /* disk reservations: */
264 void __bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *);
266 static inline void bch2_disk_reservation_put(struct bch_fs *c,
267 struct disk_reservation *res)
270 __bch2_disk_reservation_put(c, res);
273 #define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
275 int bch2_disk_reservation_add(struct bch_fs *,
276 struct disk_reservation *,
279 static inline struct disk_reservation
280 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
282 return (struct disk_reservation) {
286 .gen = c->capacity_gen,
288 .nr_replicas = nr_replicas,
292 static inline int bch2_disk_reservation_get(struct bch_fs *c,
293 struct disk_reservation *res,
295 unsigned nr_replicas,
298 *res = bch2_disk_reservation_init(c, nr_replicas);
300 return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
303 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
304 void bch2_dev_buckets_free(struct bch_dev *);
305 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
307 #endif /* _BUCKETS_H */