]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.h
Update bcachefs sources to f7670cba39 bcachefs: Fix for building in userspace
[bcachefs-tools-debian] / libbcachefs / buckets.h
1 /*
2  * Code for manipulating bucket marks for garbage collection.
3  *
4  * Copyright 2014 Datera, Inc.
5  */
6
7 #ifndef _BUCKETS_H
8 #define _BUCKETS_H
9
10 #include "buckets_types.h"
11 #include "super.h"
12
13 #define for_each_bucket(_b, _buckets)                           \
14         for (_b = (_buckets)->b + (_buckets)->first_bucket;     \
15              _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
16
17 #define bucket_cmpxchg(g, new, expr)                            \
18 ({                                                              \
19         u64 _v = atomic64_read(&(g)->_mark.v);                  \
20         struct bucket_mark _old;                                \
21                                                                 \
22         do {                                                    \
23                 (new).v.counter = _old.v.counter = _v;          \
24                 expr;                                           \
25         } while ((_v = atomic64_cmpxchg(&(g)->_mark.v,          \
26                                _old.v.counter,                  \
27                                (new).v.counter)) != _old.v.counter);\
28         _old;                                                   \
29 })
30
31 static inline struct bucket_array *__bucket_array(struct bch_dev *ca,
32                                                   bool gc)
33 {
34         return rcu_dereference_check(ca->buckets[gc],
35                                      !ca->fs ||
36                                      percpu_rwsem_is_held(&ca->fs->mark_lock) ||
37                                      lockdep_is_held(&ca->fs->gc_lock) ||
38                                      lockdep_is_held(&ca->bucket_lock));
39 }
40
41 static inline struct bucket_array *bucket_array(struct bch_dev *ca)
42 {
43         return __bucket_array(ca, false);
44 }
45
46 static inline struct bucket *__bucket(struct bch_dev *ca, size_t b, bool gc)
47 {
48         struct bucket_array *buckets = __bucket_array(ca, gc);
49
50         BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
51         return buckets->b + b;
52 }
53
54 static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
55 {
56         return __bucket(ca, b, false);
57 }
58
59 static inline void bucket_set_dirty(struct bch_dev *ca, size_t b)
60 {
61         struct bucket *g;
62         struct bucket_mark m;
63
64         rcu_read_lock();
65         g = bucket(ca, b);
66         bucket_cmpxchg(g, m, m.dirty = true);
67         rcu_read_unlock();
68
69 }
70
71 static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
72                                          size_t b, int rw)
73 {
74         bucket(ca, b)->io_time[rw] = c->bucket_clock[rw].hand;
75 }
76
77 static inline u16 bucket_last_io(struct bch_fs *c, struct bucket *g, int rw)
78 {
79         return c->bucket_clock[rw].hand - g->io_time[rw];
80 }
81
82 /*
83  * bucket_gc_gen() returns the difference between the bucket's current gen and
84  * the oldest gen of any pointer into that bucket in the btree.
85  */
86
87 static inline u8 bucket_gc_gen(struct bch_dev *ca, size_t b)
88 {
89         return bucket(ca, b)->mark.gen - ca->oldest_gens[b];
90 }
91
92 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
93                                    const struct bch_extent_ptr *ptr)
94 {
95         return sector_to_bucket(ca, ptr->offset);
96 }
97
98 static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
99                                         const struct bch_extent_ptr *ptr)
100 {
101         return bucket(ca, PTR_BUCKET_NR(ca, ptr));
102 }
103
104 static inline struct bucket_mark ptr_bucket_mark(struct bch_dev *ca,
105                                                  const struct bch_extent_ptr *ptr)
106 {
107         struct bucket_mark m;
108
109         rcu_read_lock();
110         m = READ_ONCE(bucket(ca, PTR_BUCKET_NR(ca, ptr))->mark);
111         rcu_read_unlock();
112
113         return m;
114 }
115
116 static inline int gen_cmp(u8 a, u8 b)
117 {
118         return (s8) (a - b);
119 }
120
121 static inline int gen_after(u8 a, u8 b)
122 {
123         int r = gen_cmp(a, b);
124
125         return r > 0 ? r : 0;
126 }
127
128 /**
129  * ptr_stale() - check if a pointer points into a bucket that has been
130  * invalidated.
131  */
132 static inline u8 ptr_stale(struct bch_dev *ca,
133                            const struct bch_extent_ptr *ptr)
134 {
135         return gen_after(ptr_bucket_mark(ca, ptr).gen, ptr->gen);
136 }
137
138 static inline unsigned __ptr_disk_sectors(struct extent_ptr_decoded p,
139                                           unsigned live_size)
140 {
141         return live_size && p.crc.compression_type
142                 ? max(1U, DIV_ROUND_UP(live_size * p.crc.compressed_size,
143                                        p.crc.uncompressed_size))
144                 : live_size;
145 }
146
147 static inline unsigned ptr_disk_sectors(struct extent_ptr_decoded p)
148 {
149         return __ptr_disk_sectors(p, p.crc.live_size);
150 }
151
152 /* bucket gc marks */
153
154 static inline unsigned bucket_sectors_used(struct bucket_mark mark)
155 {
156         return mark.dirty_sectors + mark.cached_sectors;
157 }
158
159 static inline bool bucket_unused(struct bucket_mark mark)
160 {
161         return !mark.owned_by_allocator &&
162                 !mark.data_type &&
163                 !bucket_sectors_used(mark);
164 }
165
166 static inline bool is_available_bucket(struct bucket_mark mark)
167 {
168         return (!mark.owned_by_allocator &&
169                 !mark.dirty_sectors &&
170                 !mark.stripe);
171 }
172
173 static inline bool bucket_needs_journal_commit(struct bucket_mark m,
174                                                u16 last_seq_ondisk)
175 {
176         return m.journal_seq_valid &&
177                 ((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
178 }
179
180 /* Device usage: */
181
182 struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *, bool);
183 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *, struct bch_dev *);
184
185 static inline u64 __dev_buckets_available(struct bch_dev *ca,
186                                           struct bch_dev_usage stats)
187 {
188         u64 total = ca->mi.nbuckets - ca->mi.first_bucket;
189
190         if (WARN_ONCE(stats.buckets_unavailable > total,
191                       "buckets_unavailable overflow (%llu > %llu)\n",
192                       stats.buckets_unavailable, total))
193                 return 0;
194
195         return total - stats.buckets_unavailable;
196 }
197
198 /*
199  * Number of reclaimable buckets - only for use by the allocator thread:
200  */
201 static inline u64 dev_buckets_available(struct bch_fs *c, struct bch_dev *ca)
202 {
203         return __dev_buckets_available(ca, bch2_dev_usage_read(c, ca));
204 }
205
206 static inline u64 __dev_buckets_free(struct bch_dev *ca,
207                                      struct bch_dev_usage stats)
208 {
209         return __dev_buckets_available(ca, stats) +
210                 fifo_used(&ca->free[RESERVE_NONE]) +
211                 fifo_used(&ca->free_inc);
212 }
213
214 static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
215 {
216         return __dev_buckets_free(ca, bch2_dev_usage_read(c, ca));
217 }
218
219 /* Filesystem usage: */
220
221 struct bch_fs_usage __bch2_fs_usage_read(struct bch_fs *, bool);
222 struct bch_fs_usage bch2_fs_usage_read(struct bch_fs *);
223
224 u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage);
225
226 struct bch_fs_usage_short
227 bch2_fs_usage_read_short(struct bch_fs *);
228
229 static inline u64 bch2_fs_sectors_free(struct bch_fs *c)
230 {
231         struct bch_fs_usage_short usage = bch2_fs_usage_read_short(c);
232
233         return usage.capacity - usage.used;
234 }
235
236 /* key/bucket marking: */
237
238 void bch2_bucket_seq_cleanup(struct bch_fs *);
239
240 void bch2_invalidate_bucket(struct bch_fs *, struct bch_dev *,
241                             size_t, struct bucket_mark *);
242 void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *,
243                             size_t, bool, struct gc_pos, unsigned);
244 void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
245                                size_t, enum bch_data_type, unsigned,
246                                struct gc_pos, unsigned);
247
248 #define BCH_BUCKET_MARK_NOATOMIC                (1 << 0)
249 #define BCH_BUCKET_MARK_GC                      (1 << 1)
250
251 int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c,
252                   bool, s64, struct gc_pos,
253                   struct bch_fs_usage *, u64, unsigned);
254 int bch2_mark_key(struct bch_fs *, struct bkey_s_c,
255                   bool, s64, struct gc_pos,
256                   struct bch_fs_usage *, u64, unsigned);
257 void bch2_mark_update(struct btree_insert *, struct btree_insert_entry *);
258 void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
259                          struct disk_reservation *, struct gc_pos);
260
261 /* disk reservations: */
262
263 void __bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *);
264
265 static inline void bch2_disk_reservation_put(struct bch_fs *c,
266                                              struct disk_reservation *res)
267 {
268         if (res->sectors)
269                 __bch2_disk_reservation_put(c, res);
270 }
271
272 #define BCH_DISK_RESERVATION_NOFAIL             (1 << 0)
273 #define BCH_DISK_RESERVATION_GC_LOCK_HELD       (1 << 1)
274 #define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD   (1 << 2)
275
276 int bch2_disk_reservation_add(struct bch_fs *,
277                              struct disk_reservation *,
278                              unsigned, int);
279
280 static inline struct disk_reservation
281 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
282 {
283         return (struct disk_reservation) {
284                 .sectors        = 0,
285 #if 0
286                 /* not used yet: */
287                 .gen            = c->capacity_gen,
288 #endif
289                 .nr_replicas    = nr_replicas,
290         };
291 }
292
293 static inline int bch2_disk_reservation_get(struct bch_fs *c,
294                                             struct disk_reservation *res,
295                                             unsigned sectors,
296                                             unsigned nr_replicas,
297                                             int flags)
298 {
299         *res = bch2_disk_reservation_init(c, nr_replicas);
300
301         return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
302 }
303
304 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
305 void bch2_dev_buckets_free(struct bch_dev *);
306 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
307
308 #endif /* _BUCKETS_H */