]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.h
5ab6f3d3413718218ddf060473f05d53ce23a3c0
[bcachefs-tools-debian] / libbcachefs / buckets.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  */
7
8 #ifndef _BUCKETS_H
9 #define _BUCKETS_H
10
11 #include "buckets_types.h"
12 #include "super.h"
13
14 #define for_each_bucket(_b, _buckets)                           \
15         for (_b = (_buckets)->b + (_buckets)->first_bucket;     \
16              _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
17
18 #define bucket_cmpxchg(g, new, expr)                            \
19 ({                                                              \
20         struct bucket *_g = g;                                  \
21         u64 _v = atomic64_read(&(g)->_mark.v);                  \
22         struct bucket_mark _old;                                \
23                                                                 \
24         do {                                                    \
25                 (new).v.counter = _old.v.counter = _v;          \
26                 expr;                                           \
27         } while ((_v = atomic64_cmpxchg(&(_g)->_mark.v,         \
28                                _old.v.counter,                  \
29                                (new).v.counter)) != _old.v.counter);\
30         _old;                                                   \
31 })
32
33 static inline struct bucket_array *__bucket_array(struct bch_dev *ca,
34                                                   bool gc)
35 {
36         return rcu_dereference_check(ca->buckets[gc],
37                                      !ca->fs ||
38                                      percpu_rwsem_is_held(&ca->fs->mark_lock) ||
39                                      lockdep_is_held(&ca->fs->gc_lock) ||
40                                      lockdep_is_held(&ca->bucket_lock));
41 }
42
43 static inline struct bucket_array *bucket_array(struct bch_dev *ca)
44 {
45         return __bucket_array(ca, false);
46 }
47
48 static inline struct bucket *__bucket(struct bch_dev *ca, size_t b, bool gc)
49 {
50         struct bucket_array *buckets = __bucket_array(ca, gc);
51
52         BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
53         return buckets->b + b;
54 }
55
56 static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
57 {
58         return __bucket(ca, b, false);
59 }
60
61 static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
62                                          size_t b, int rw)
63 {
64         bucket(ca, b)->io_time[rw] = c->bucket_clock[rw].hand;
65 }
66
67 static inline u16 bucket_last_io(struct bch_fs *c, struct bucket *g, int rw)
68 {
69         return c->bucket_clock[rw].hand - g->io_time[rw];
70 }
71
72 /*
73  * bucket_gc_gen() returns the difference between the bucket's current gen and
74  * the oldest gen of any pointer into that bucket in the btree.
75  */
76
77 static inline u8 bucket_gc_gen(struct bch_dev *ca, size_t b)
78 {
79         struct bucket *g = bucket(ca, b);
80
81         return g->mark.gen - g->oldest_gen;
82 }
83
84 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
85                                    const struct bch_extent_ptr *ptr)
86 {
87         return sector_to_bucket(ca, ptr->offset);
88 }
89
90 static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
91                                         const struct bch_extent_ptr *ptr,
92                                         bool gc)
93 {
94         return __bucket(ca, PTR_BUCKET_NR(ca, ptr), gc);
95 }
96
97 static inline struct bucket_mark ptr_bucket_mark(struct bch_dev *ca,
98                                                  const struct bch_extent_ptr *ptr)
99 {
100         struct bucket_mark m;
101
102         rcu_read_lock();
103         m = READ_ONCE(PTR_BUCKET(ca, ptr, 0)->mark);
104         rcu_read_unlock();
105
106         return m;
107 }
108
109 static inline int gen_cmp(u8 a, u8 b)
110 {
111         return (s8) (a - b);
112 }
113
114 static inline int gen_after(u8 a, u8 b)
115 {
116         int r = gen_cmp(a, b);
117
118         return r > 0 ? r : 0;
119 }
120
121 /**
122  * ptr_stale() - check if a pointer points into a bucket that has been
123  * invalidated.
124  */
125 static inline u8 ptr_stale(struct bch_dev *ca,
126                            const struct bch_extent_ptr *ptr)
127 {
128         return gen_after(ptr_bucket_mark(ca, ptr).gen, ptr->gen);
129 }
130
131 static inline unsigned __ptr_disk_sectors(struct extent_ptr_decoded p,
132                                           unsigned live_size)
133 {
134         return live_size && p.crc.compression_type
135                 ? max(1U, DIV_ROUND_UP(live_size * p.crc.compressed_size,
136                                        p.crc.uncompressed_size))
137                 : live_size;
138 }
139
140 static inline unsigned ptr_disk_sectors(struct extent_ptr_decoded p)
141 {
142         return __ptr_disk_sectors(p, p.crc.live_size);
143 }
144
145 /* bucket gc marks */
146
147 static inline unsigned bucket_sectors_used(struct bucket_mark mark)
148 {
149         return mark.dirty_sectors + mark.cached_sectors;
150 }
151
152 static inline bool bucket_unused(struct bucket_mark mark)
153 {
154         return !mark.owned_by_allocator &&
155                 !mark.data_type &&
156                 !bucket_sectors_used(mark);
157 }
158
159 static inline bool is_available_bucket(struct bucket_mark mark)
160 {
161         return (!mark.owned_by_allocator &&
162                 !mark.dirty_sectors &&
163                 !mark.stripe);
164 }
165
166 static inline bool bucket_needs_journal_commit(struct bucket_mark m,
167                                                u16 last_seq_ondisk)
168 {
169         return m.journal_seq_valid &&
170                 ((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
171 }
172
173 /* Device usage: */
174
175 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *, struct bch_dev *);
176
177 void bch2_dev_usage_from_buckets(struct bch_fs *);
178
179 static inline u64 __dev_buckets_available(struct bch_dev *ca,
180                                           struct bch_dev_usage stats)
181 {
182         u64 total = ca->mi.nbuckets - ca->mi.first_bucket;
183
184         if (WARN_ONCE(stats.buckets_unavailable > total,
185                       "buckets_unavailable overflow (%llu > %llu)\n",
186                       stats.buckets_unavailable, total))
187                 return 0;
188
189         return total - stats.buckets_unavailable;
190 }
191
192 /*
193  * Number of reclaimable buckets - only for use by the allocator thread:
194  */
195 static inline u64 dev_buckets_available(struct bch_fs *c, struct bch_dev *ca)
196 {
197         return __dev_buckets_available(ca, bch2_dev_usage_read(c, ca));
198 }
199
200 static inline u64 __dev_buckets_free(struct bch_dev *ca,
201                                      struct bch_dev_usage stats)
202 {
203         return __dev_buckets_available(ca, stats) +
204                 fifo_used(&ca->free[RESERVE_NONE]) +
205                 fifo_used(&ca->free_inc);
206 }
207
208 static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
209 {
210         return __dev_buckets_free(ca, bch2_dev_usage_read(c, ca));
211 }
212
213 /* Filesystem usage: */
214
215 static inline unsigned fs_usage_u64s(struct bch_fs *c)
216 {
217
218         return sizeof(struct bch_fs_usage) / sizeof(u64) +
219                 READ_ONCE(c->replicas.nr);
220 }
221
222 void bch2_fs_usage_scratch_put(struct bch_fs *, struct bch_fs_usage *);
223 struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *);
224
225 u64 bch2_fs_usage_read_one(struct bch_fs *, u64 *);
226
227 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *);
228
229 void bch2_fs_usage_acc_to_base(struct bch_fs *, unsigned);
230
231 void bch2_fs_usage_to_text(struct printbuf *,
232                            struct bch_fs *, struct bch_fs_usage *);
233
234 u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage *);
235
236 struct bch_fs_usage_short
237 bch2_fs_usage_read_short(struct bch_fs *);
238
239 /* key/bucket marking: */
240
241 void bch2_bucket_seq_cleanup(struct bch_fs *);
242 void bch2_fs_usage_initialize(struct bch_fs *);
243
244 void bch2_invalidate_bucket(struct bch_fs *, struct bch_dev *,
245                             size_t, struct bucket_mark *);
246 void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *,
247                             size_t, bool, struct gc_pos, unsigned);
248 void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
249                                size_t, enum bch_data_type, unsigned,
250                                struct gc_pos, unsigned);
251
252 #define BCH_BUCKET_MARK_INSERT                  (1 << 0)
253 #define BCH_BUCKET_MARK_OVERWRITE               (1 << 1)
254 #define BCH_BUCKET_MARK_BUCKET_INVALIDATE       (1 << 2)
255 #define BCH_BUCKET_MARK_GC                      (1 << 3)
256 #define BCH_BUCKET_MARK_ALLOC_READ              (1 << 4)
257 #define BCH_BUCKET_MARK_NOATOMIC                (1 << 5)
258
259 int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c, s64,
260                          struct bch_fs_usage *, u64, unsigned);
261 int bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64,
262                   struct bch_fs_usage *, u64, unsigned);
263 int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
264                         struct disk_reservation *, unsigned);
265
266 int bch2_mark_overwrite(struct btree_trans *, struct btree_iter *,
267                         struct bkey_s_c, struct bkey_i *,
268                         struct bch_fs_usage *, unsigned);
269 int bch2_mark_update(struct btree_trans *, struct btree_insert_entry *,
270                      struct bch_fs_usage *, unsigned);
271
272 void bch2_replicas_delta_list_apply(struct bch_fs *,
273                                     struct bch_fs_usage *,
274                                     struct replicas_delta_list *);
275 int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, s64, unsigned);
276 int bch2_trans_mark_update(struct btree_trans *,
277                            struct btree_iter *iter,
278                            struct bkey_i *insert);
279 void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage *);
280
281 /* disk reservations: */
282
283 void __bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *);
284
285 static inline void bch2_disk_reservation_put(struct bch_fs *c,
286                                              struct disk_reservation *res)
287 {
288         if (res->sectors)
289                 __bch2_disk_reservation_put(c, res);
290 }
291
292 #define BCH_DISK_RESERVATION_NOFAIL             (1 << 0)
293
294 int bch2_disk_reservation_add(struct bch_fs *,
295                              struct disk_reservation *,
296                              unsigned, int);
297
298 static inline struct disk_reservation
299 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
300 {
301         return (struct disk_reservation) {
302                 .sectors        = 0,
303 #if 0
304                 /* not used yet: */
305                 .gen            = c->capacity_gen,
306 #endif
307                 .nr_replicas    = nr_replicas,
308         };
309 }
310
311 static inline int bch2_disk_reservation_get(struct bch_fs *c,
312                                             struct disk_reservation *res,
313                                             unsigned sectors,
314                                             unsigned nr_replicas,
315                                             int flags)
316 {
317         *res = bch2_disk_reservation_init(c, nr_replicas);
318
319         return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
320 }
321
322 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
323 void bch2_dev_buckets_free(struct bch_dev *);
324 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
325
326 #endif /* _BUCKETS_H */