2 * Code for manipulating bucket marks for garbage collection.
4 * Copyright 2014 Datera, Inc.
10 #include "buckets_types.h"
13 #define for_each_bucket(b, ca) \
14 for (b = (ca)->buckets + (ca)->mi.first_bucket; \
15 b < (ca)->buckets + (ca)->mi.nbuckets; b++)
17 #define bucket_cmpxchg(g, new, expr) \
19 u64 _v = READ_ONCE((g)->_mark.counter); \
20 struct bucket_mark _old; \
23 (new).counter = _old.counter = _v; \
25 } while ((_v = cmpxchg(&(g)->_mark.counter, \
27 (new).counter)) != _old.counter);\
32 * bucket_gc_gen() returns the difference between the bucket's current gen and
33 * the oldest gen of any pointer into that bucket in the btree.
36 static inline u8 bucket_gc_gen(struct cache *ca, struct bucket *g)
38 unsigned long r = g - ca->buckets;
39 return g->mark.gen - ca->oldest_gens[r];
42 static inline struct cache *PTR_CACHE(const struct cache_set *c,
43 const struct bch_extent_ptr *ptr)
45 EBUG_ON(ptr->dev > rcu_dereference(c->members)->nr_devices);
47 return rcu_dereference(c->cache[ptr->dev]);
50 static inline size_t PTR_BUCKET_NR(const struct cache *ca,
51 const struct bch_extent_ptr *ptr)
53 return sector_to_bucket(ca, ptr->offset);
57 * Returns 0 if no pointers or device offline - only for tracepoints!
59 static inline size_t PTR_BUCKET_NR_TRACE(const struct cache_set *c,
60 const struct bkey_i *k,
65 if (bkey_extent_is_data(&k->k)) {
66 const struct bch_extent_ptr *ptr;
67 const struct cache *ca;
70 extent_for_each_online_device(c, bkey_i_to_s_c_extent(k), ptr, ca) {
71 bucket = PTR_BUCKET_NR(ca, ptr);
80 static inline struct bucket *PTR_BUCKET(const struct cache *ca,
81 const struct bch_extent_ptr *ptr)
83 return ca->buckets + PTR_BUCKET_NR(ca, ptr);
86 static inline u8 __gen_after(u8 a, u8 b)
90 return r > 128U ? 0 : r;
93 static inline u8 gen_after(u8 a, u8 b)
103 * ptr_stale() - check if a pointer points into a bucket that has been
106 * Warning: PTR_CACHE(c, k, ptr) must equal ca.
108 static inline u8 ptr_stale(const struct cache *ca,
109 const struct bch_extent_ptr *ptr)
111 return gen_after(PTR_BUCKET(ca, ptr)->mark.gen, ptr->gen);
116 static inline bool bucket_min_cmp(struct bucket_heap_entry l,
117 struct bucket_heap_entry r)
119 return l.val < r.val;
122 static inline bool bucket_max_cmp(struct bucket_heap_entry l,
123 struct bucket_heap_entry r)
125 return l.val > r.val;
128 static inline void bucket_heap_push(struct cache *ca, struct bucket *g,
131 struct bucket_heap_entry new = { g, val };
133 if (!heap_full(&ca->heap))
134 heap_add(&ca->heap, new, bucket_min_cmp);
135 else if (bucket_min_cmp(new, heap_peek(&ca->heap))) {
136 ca->heap.data[0] = new;
137 heap_sift(&ca->heap, 0, bucket_min_cmp);
141 /* bucket gc marks */
143 /* The dirty and cached sector counts saturate. If this occurs,
144 * reference counting alone will not free the bucket, and a btree
145 * GC must be performed. */
146 #define GC_MAX_SECTORS_USED ((1U << 15) - 1)
148 static inline bool bucket_unused(struct bucket *g)
150 return !g->mark.counter;
153 static inline unsigned bucket_sectors_used(struct bucket *g)
155 return g->mark.dirty_sectors + g->mark.cached_sectors;
158 /* Per device stats: */
160 struct bucket_stats_cache __bch_bucket_stats_read_cache(struct cache *);
161 struct bucket_stats_cache bch_bucket_stats_read_cache(struct cache *);
163 static inline u64 __buckets_available_cache(struct cache *ca,
164 struct bucket_stats_cache stats)
167 ca->mi.nbuckets - ca->mi.first_bucket -
168 stats.buckets_dirty -
169 stats.buckets_alloc -
174 * Number of reclaimable buckets - only for use by the allocator thread:
176 static inline u64 buckets_available_cache(struct cache *ca)
178 return __buckets_available_cache(ca, bch_bucket_stats_read_cache(ca));
181 static inline u64 __buckets_free_cache(struct cache *ca,
182 struct bucket_stats_cache stats)
184 return __buckets_available_cache(ca, stats) +
185 fifo_used(&ca->free[RESERVE_NONE]) +
186 fifo_used(&ca->free_inc);
189 static inline u64 buckets_free_cache(struct cache *ca)
191 return __buckets_free_cache(ca, bch_bucket_stats_read_cache(ca));
194 /* Cache set stats: */
196 struct bucket_stats_cache_set __bch_bucket_stats_read_cache_set(struct cache_set *);
197 struct bucket_stats_cache_set bch_bucket_stats_read_cache_set(struct cache_set *);
198 void bch_cache_set_stats_apply(struct cache_set *,
199 struct bucket_stats_cache_set *,
200 struct disk_reservation *,
203 static inline u64 __cache_set_sectors_used(struct cache_set *c)
205 struct bucket_stats_cache_set stats = __bch_bucket_stats_read_cache_set(c);
206 u64 reserved = stats.persistent_reserved +
207 stats.online_reserved;
209 return stats.s[S_COMPRESSED][S_META] +
210 stats.s[S_COMPRESSED][S_DIRTY] +
215 static inline u64 cache_set_sectors_used(struct cache_set *c)
217 return min(c->capacity, __cache_set_sectors_used(c));
221 static inline u64 sectors_available(struct cache_set *c)
228 for_each_cache_rcu(ca, c, i)
229 ret += buckets_available_cache(ca) << ca->bucket_bits;
235 static inline bool is_available_bucket(struct bucket_mark mark)
237 return (!mark.owned_by_allocator &&
239 !mark.dirty_sectors);
242 void bch_bucket_seq_cleanup(struct cache_set *);
244 void bch_invalidate_bucket(struct cache *, struct bucket *);
245 void bch_mark_free_bucket(struct cache *, struct bucket *);
246 void bch_mark_alloc_bucket(struct cache *, struct bucket *, bool);
247 void bch_mark_metadata_bucket(struct cache *, struct bucket *, bool);
249 void __bch_gc_mark_key(struct cache_set *, struct bkey_s_c, s64, bool,
250 struct bucket_stats_cache_set *);
251 void bch_gc_mark_key(struct cache_set *, struct bkey_s_c, s64, bool);
252 void bch_mark_key(struct cache_set *, struct bkey_s_c, s64, bool,
253 struct gc_pos, struct bucket_stats_cache_set *, u64);
255 void bch_recalc_sectors_available(struct cache_set *);
257 void bch_disk_reservation_put(struct cache_set *,
258 struct disk_reservation *);
260 #define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
261 #define BCH_DISK_RESERVATION_METADATA (1 << 1)
262 #define BCH_DISK_RESERVATION_GC_LOCK_HELD (1 << 2)
263 #define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD (1 << 3)
265 int bch_disk_reservation_add(struct cache_set *,
266 struct disk_reservation *,
268 int bch_disk_reservation_get(struct cache_set *,
269 struct disk_reservation *,
272 #endif /* _BUCKETS_H */