]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.h
Update bcachefs sources to 99750eab4d bcachefs: Persist stripe blocks_used
[bcachefs-tools-debian] / libbcachefs / buckets.h
1 /*
2  * Code for manipulating bucket marks for garbage collection.
3  *
4  * Copyright 2014 Datera, Inc.
5  */
6
7 #ifndef _BUCKETS_H
8 #define _BUCKETS_H
9
10 #include "buckets_types.h"
11 #include "super.h"
12
13 #define for_each_bucket(_b, _buckets)                           \
14         for (_b = (_buckets)->b + (_buckets)->first_bucket;     \
15              _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
16
17 #define bucket_cmpxchg(g, new, expr)                            \
18 ({                                                              \
19         u64 _v = atomic64_read(&(g)->_mark.v);                  \
20         struct bucket_mark _old;                                \
21                                                                 \
22         do {                                                    \
23                 (new).v.counter = _old.v.counter = _v;          \
24                 expr;                                           \
25         } while ((_v = atomic64_cmpxchg(&(g)->_mark.v,          \
26                                _old.v.counter,                  \
27                                (new).v.counter)) != _old.v.counter);\
28         _old;                                                   \
29 })
30
31 static inline struct bucket_array *__bucket_array(struct bch_dev *ca,
32                                                   bool gc)
33 {
34         return rcu_dereference_check(ca->buckets[gc],
35                                      !ca->fs ||
36                                      percpu_rwsem_is_held(&ca->fs->mark_lock) ||
37                                      lockdep_is_held(&ca->fs->gc_lock) ||
38                                      lockdep_is_held(&ca->bucket_lock));
39 }
40
41 static inline struct bucket_array *bucket_array(struct bch_dev *ca)
42 {
43         return __bucket_array(ca, false);
44 }
45
46 static inline struct bucket *__bucket(struct bch_dev *ca, size_t b, bool gc)
47 {
48         struct bucket_array *buckets = __bucket_array(ca, gc);
49
50         BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
51         return buckets->b + b;
52 }
53
54 static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
55 {
56         return __bucket(ca, b, false);
57 }
58
59 static inline void bucket_set_dirty(struct bch_dev *ca, size_t b)
60 {
61         struct bucket *g;
62         struct bucket_mark m;
63
64         rcu_read_lock();
65         g = bucket(ca, b);
66         bucket_cmpxchg(g, m, m.dirty = true);
67         rcu_read_unlock();
68
69 }
70
71 static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
72                                          size_t b, int rw)
73 {
74         bucket(ca, b)->io_time[rw] = c->bucket_clock[rw].hand;
75 }
76
77 static inline u16 bucket_last_io(struct bch_fs *c, struct bucket *g, int rw)
78 {
79         return c->bucket_clock[rw].hand - g->io_time[rw];
80 }
81
82 /*
83  * bucket_gc_gen() returns the difference between the bucket's current gen and
84  * the oldest gen of any pointer into that bucket in the btree.
85  */
86
87 static inline u8 bucket_gc_gen(struct bch_dev *ca, size_t b)
88 {
89         return bucket(ca, b)->mark.gen - ca->oldest_gens[b];
90 }
91
92 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
93                                    const struct bch_extent_ptr *ptr)
94 {
95         return sector_to_bucket(ca, ptr->offset);
96 }
97
98 static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
99                                         const struct bch_extent_ptr *ptr)
100 {
101         return bucket(ca, PTR_BUCKET_NR(ca, ptr));
102 }
103
104 static inline struct bucket_mark ptr_bucket_mark(struct bch_dev *ca,
105                                                  const struct bch_extent_ptr *ptr)
106 {
107         struct bucket_mark m;
108
109         rcu_read_lock();
110         m = READ_ONCE(bucket(ca, PTR_BUCKET_NR(ca, ptr))->mark);
111         rcu_read_unlock();
112
113         return m;
114 }
115
116 static inline int gen_cmp(u8 a, u8 b)
117 {
118         return (s8) (a - b);
119 }
120
121 static inline int gen_after(u8 a, u8 b)
122 {
123         int r = gen_cmp(a, b);
124
125         return r > 0 ? r : 0;
126 }
127
128 /**
129  * ptr_stale() - check if a pointer points into a bucket that has been
130  * invalidated.
131  */
132 static inline u8 ptr_stale(struct bch_dev *ca,
133                            const struct bch_extent_ptr *ptr)
134 {
135         return gen_after(ptr_bucket_mark(ca, ptr).gen, ptr->gen);
136 }
137
138 static inline unsigned __ptr_disk_sectors(struct extent_ptr_decoded p,
139                                           unsigned live_size)
140 {
141         return live_size && p.crc.compression_type
142                 ? max(1U, DIV_ROUND_UP(live_size * p.crc.compressed_size,
143                                        p.crc.uncompressed_size))
144                 : live_size;
145 }
146
147 static inline unsigned ptr_disk_sectors(struct extent_ptr_decoded p)
148 {
149         return __ptr_disk_sectors(p, p.crc.live_size);
150 }
151
152 /* bucket gc marks */
153
154 static inline unsigned bucket_sectors_used(struct bucket_mark mark)
155 {
156         return mark.dirty_sectors + mark.cached_sectors;
157 }
158
159 static inline bool bucket_unused(struct bucket_mark mark)
160 {
161         return !mark.owned_by_allocator &&
162                 !mark.data_type &&
163                 !bucket_sectors_used(mark);
164 }
165
166 static inline bool is_available_bucket(struct bucket_mark mark)
167 {
168         return (!mark.owned_by_allocator &&
169                 !mark.dirty_sectors &&
170                 !mark.stripe);
171 }
172
173 static inline bool bucket_needs_journal_commit(struct bucket_mark m,
174                                                u16 last_seq_ondisk)
175 {
176         return m.journal_seq_valid &&
177                 ((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
178 }
179
180 /* Device usage: */
181
182 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *, struct bch_dev *);
183
184 void bch2_dev_usage_from_buckets(struct bch_fs *, struct bch_dev *);
185
186 static inline u64 __dev_buckets_available(struct bch_dev *ca,
187                                           struct bch_dev_usage stats)
188 {
189         u64 total = ca->mi.nbuckets - ca->mi.first_bucket;
190
191         if (WARN_ONCE(stats.buckets_unavailable > total,
192                       "buckets_unavailable overflow (%llu > %llu)\n",
193                       stats.buckets_unavailable, total))
194                 return 0;
195
196         return total - stats.buckets_unavailable;
197 }
198
199 /*
200  * Number of reclaimable buckets - only for use by the allocator thread:
201  */
202 static inline u64 dev_buckets_available(struct bch_fs *c, struct bch_dev *ca)
203 {
204         return __dev_buckets_available(ca, bch2_dev_usage_read(c, ca));
205 }
206
207 static inline u64 __dev_buckets_free(struct bch_dev *ca,
208                                      struct bch_dev_usage stats)
209 {
210         return __dev_buckets_available(ca, stats) +
211                 fifo_used(&ca->free[RESERVE_NONE]) +
212                 fifo_used(&ca->free_inc);
213 }
214
215 static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
216 {
217         return __dev_buckets_free(ca, bch2_dev_usage_read(c, ca));
218 }
219
220 /* Filesystem usage: */
221
222 static inline struct bch_fs_usage *bch2_fs_usage_get_scratch(struct bch_fs *c)
223 {
224         struct bch_fs_usage *ret;
225
226         ret = this_cpu_ptr(c->usage_scratch);
227
228         memset(ret, 0, sizeof(*ret) + c->replicas.nr * sizeof(u64));
229
230         return ret;
231 }
232
233 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *);
234
235 u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage);
236
237 struct bch_fs_usage_short
238 bch2_fs_usage_read_short(struct bch_fs *);
239
240 static inline u64 bch2_fs_sectors_free(struct bch_fs *c)
241 {
242         struct bch_fs_usage_short usage = bch2_fs_usage_read_short(c);
243
244         return usage.capacity - usage.used;
245 }
246
247 /* key/bucket marking: */
248
249 void bch2_bucket_seq_cleanup(struct bch_fs *);
250
251 void bch2_invalidate_bucket(struct bch_fs *, struct bch_dev *,
252                             size_t, struct bucket_mark *);
253 void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *,
254                             size_t, bool, struct gc_pos, unsigned);
255 void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
256                                size_t, enum bch_data_type, unsigned,
257                                struct gc_pos, unsigned);
258
259 #define BCH_BUCKET_MARK_NOATOMIC                (1 << 0)
260 #define BCH_BUCKET_MARK_GC                      (1 << 1)
261
262 int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c,
263                   bool, s64, struct gc_pos,
264                   struct bch_fs_usage *, u64, unsigned);
265 int bch2_mark_key(struct bch_fs *, struct bkey_s_c,
266                   bool, s64, struct gc_pos,
267                   struct bch_fs_usage *, u64, unsigned);
268 void bch2_mark_update(struct btree_insert *, struct btree_insert_entry *);
269 int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
270                         struct disk_reservation *, struct gc_pos);
271
272 /* disk reservations: */
273
274 void __bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *);
275
276 static inline void bch2_disk_reservation_put(struct bch_fs *c,
277                                              struct disk_reservation *res)
278 {
279         if (res->sectors)
280                 __bch2_disk_reservation_put(c, res);
281 }
282
283 #define BCH_DISK_RESERVATION_NOFAIL             (1 << 0)
284 #define BCH_DISK_RESERVATION_GC_LOCK_HELD       (1 << 1)
285 #define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD   (1 << 2)
286
287 int bch2_disk_reservation_add(struct bch_fs *,
288                              struct disk_reservation *,
289                              unsigned, int);
290
291 static inline struct disk_reservation
292 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
293 {
294         return (struct disk_reservation) {
295                 .sectors        = 0,
296 #if 0
297                 /* not used yet: */
298                 .gen            = c->capacity_gen,
299 #endif
300                 .nr_replicas    = nr_replicas,
301         };
302 }
303
304 static inline int bch2_disk_reservation_get(struct bch_fs *c,
305                                             struct disk_reservation *res,
306                                             unsigned sectors,
307                                             unsigned nr_replicas,
308                                             int flags)
309 {
310         *res = bch2_disk_reservation_init(c, nr_replicas);
311
312         return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
313 }
314
315 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
316 void bch2_dev_buckets_free(struct bch_dev *);
317 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
318
319 #endif /* _BUCKETS_H */