]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.h
Update bcachefs sources to 75e8a078b8 bcachefs: improved flush_held_btree_writes()
[bcachefs-tools-debian] / libbcachefs / buckets.h
1 /*
2  * Code for manipulating bucket marks for garbage collection.
3  *
4  * Copyright 2014 Datera, Inc.
5  */
6
7 #ifndef _BUCKETS_H
8 #define _BUCKETS_H
9
10 #include "buckets_types.h"
11 #include "super.h"
12
13 #define for_each_bucket(_b, _buckets)                           \
14         for (_b = (_buckets)->b + (_buckets)->first_bucket;     \
15              _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
16
17 #define bucket_cmpxchg(g, new, expr)                            \
18 ({                                                              \
19         struct bucket *_g = g;                                  \
20         u64 _v = atomic64_read(&(g)->_mark.v);                  \
21         struct bucket_mark _old;                                \
22                                                                 \
23         do {                                                    \
24                 (new).v.counter = _old.v.counter = _v;          \
25                 expr;                                           \
26         } while ((_v = atomic64_cmpxchg(&(_g)->_mark.v,         \
27                                _old.v.counter,                  \
28                                (new).v.counter)) != _old.v.counter);\
29         _old;                                                   \
30 })
31
32 static inline struct bucket_array *__bucket_array(struct bch_dev *ca,
33                                                   bool gc)
34 {
35         return rcu_dereference_check(ca->buckets[gc],
36                                      !ca->fs ||
37                                      percpu_rwsem_is_held(&ca->fs->mark_lock) ||
38                                      lockdep_is_held(&ca->fs->gc_lock) ||
39                                      lockdep_is_held(&ca->bucket_lock));
40 }
41
42 static inline struct bucket_array *bucket_array(struct bch_dev *ca)
43 {
44         return __bucket_array(ca, false);
45 }
46
47 static inline struct bucket *__bucket(struct bch_dev *ca, size_t b, bool gc)
48 {
49         struct bucket_array *buckets = __bucket_array(ca, gc);
50
51         BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
52         return buckets->b + b;
53 }
54
55 static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
56 {
57         return __bucket(ca, b, false);
58 }
59
60 static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
61                                          size_t b, int rw)
62 {
63         bucket(ca, b)->io_time[rw] = c->bucket_clock[rw].hand;
64 }
65
66 static inline u16 bucket_last_io(struct bch_fs *c, struct bucket *g, int rw)
67 {
68         return c->bucket_clock[rw].hand - g->io_time[rw];
69 }
70
71 /*
72  * bucket_gc_gen() returns the difference between the bucket's current gen and
73  * the oldest gen of any pointer into that bucket in the btree.
74  */
75
76 static inline u8 bucket_gc_gen(struct bch_dev *ca, size_t b)
77 {
78         struct bucket *g = bucket(ca, b);
79
80         return g->mark.gen - g->oldest_gen;
81 }
82
83 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
84                                    const struct bch_extent_ptr *ptr)
85 {
86         return sector_to_bucket(ca, ptr->offset);
87 }
88
89 static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
90                                         const struct bch_extent_ptr *ptr,
91                                         bool gc)
92 {
93         return __bucket(ca, PTR_BUCKET_NR(ca, ptr), gc);
94 }
95
96 static inline struct bucket_mark ptr_bucket_mark(struct bch_dev *ca,
97                                                  const struct bch_extent_ptr *ptr)
98 {
99         struct bucket_mark m;
100
101         rcu_read_lock();
102         m = READ_ONCE(bucket(ca, PTR_BUCKET_NR(ca, ptr))->mark);
103         rcu_read_unlock();
104
105         return m;
106 }
107
108 static inline int gen_cmp(u8 a, u8 b)
109 {
110         return (s8) (a - b);
111 }
112
113 static inline int gen_after(u8 a, u8 b)
114 {
115         int r = gen_cmp(a, b);
116
117         return r > 0 ? r : 0;
118 }
119
120 /**
121  * ptr_stale() - check if a pointer points into a bucket that has been
122  * invalidated.
123  */
124 static inline u8 ptr_stale(struct bch_dev *ca,
125                            const struct bch_extent_ptr *ptr)
126 {
127         return gen_after(ptr_bucket_mark(ca, ptr).gen, ptr->gen);
128 }
129
130 static inline unsigned __ptr_disk_sectors(struct extent_ptr_decoded p,
131                                           unsigned live_size)
132 {
133         return live_size && p.crc.compression_type
134                 ? max(1U, DIV_ROUND_UP(live_size * p.crc.compressed_size,
135                                        p.crc.uncompressed_size))
136                 : live_size;
137 }
138
139 static inline unsigned ptr_disk_sectors(struct extent_ptr_decoded p)
140 {
141         return __ptr_disk_sectors(p, p.crc.live_size);
142 }
143
144 /* bucket gc marks */
145
146 static inline unsigned bucket_sectors_used(struct bucket_mark mark)
147 {
148         return mark.dirty_sectors + mark.cached_sectors;
149 }
150
151 static inline bool bucket_unused(struct bucket_mark mark)
152 {
153         return !mark.owned_by_allocator &&
154                 !mark.data_type &&
155                 !bucket_sectors_used(mark);
156 }
157
158 static inline bool is_available_bucket(struct bucket_mark mark)
159 {
160         return (!mark.owned_by_allocator &&
161                 !mark.dirty_sectors &&
162                 !mark.stripe);
163 }
164
165 static inline bool bucket_needs_journal_commit(struct bucket_mark m,
166                                                u16 last_seq_ondisk)
167 {
168         return m.journal_seq_valid &&
169                 ((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
170 }
171
172 /* Device usage: */
173
174 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *, struct bch_dev *);
175
176 void bch2_dev_usage_from_buckets(struct bch_fs *, struct bch_dev *);
177
178 static inline u64 __dev_buckets_available(struct bch_dev *ca,
179                                           struct bch_dev_usage stats)
180 {
181         u64 total = ca->mi.nbuckets - ca->mi.first_bucket;
182
183         if (WARN_ONCE(stats.buckets_unavailable > total,
184                       "buckets_unavailable overflow (%llu > %llu)\n",
185                       stats.buckets_unavailable, total))
186                 return 0;
187
188         return total - stats.buckets_unavailable;
189 }
190
191 /*
192  * Number of reclaimable buckets - only for use by the allocator thread:
193  */
194 static inline u64 dev_buckets_available(struct bch_fs *c, struct bch_dev *ca)
195 {
196         return __dev_buckets_available(ca, bch2_dev_usage_read(c, ca));
197 }
198
199 static inline u64 __dev_buckets_free(struct bch_dev *ca,
200                                      struct bch_dev_usage stats)
201 {
202         return __dev_buckets_available(ca, stats) +
203                 fifo_used(&ca->free[RESERVE_NONE]) +
204                 fifo_used(&ca->free_inc);
205 }
206
207 static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
208 {
209         return __dev_buckets_free(ca, bch2_dev_usage_read(c, ca));
210 }
211
212 /* Filesystem usage: */
213
214 static inline unsigned fs_usage_u64s(struct bch_fs *c)
215 {
216
217         return sizeof(struct bch_fs_usage) / sizeof(u64) +
218                 READ_ONCE(c->replicas.nr);
219 }
220
221 static inline struct bch_fs_usage *bch2_fs_usage_get_scratch(struct bch_fs *c)
222 {
223         struct bch_fs_usage *ret = this_cpu_ptr(c->usage_scratch);
224
225         memset(ret, 0, fs_usage_u64s(c) * sizeof(u64));
226         return ret;
227 }
228
229 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *);
230
231 u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage *);
232
233 struct bch_fs_usage_short
234 bch2_fs_usage_read_short(struct bch_fs *);
235
236 /* key/bucket marking: */
237
238 void bch2_bucket_seq_cleanup(struct bch_fs *);
239 void bch2_fs_usage_initialize(struct bch_fs *);
240
241 void bch2_invalidate_bucket(struct bch_fs *, struct bch_dev *,
242                             size_t, struct bucket_mark *);
243 void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *,
244                             size_t, bool, struct gc_pos, unsigned);
245 void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
246                                size_t, enum bch_data_type, unsigned,
247                                struct gc_pos, unsigned);
248
249 #define BCH_BUCKET_MARK_GC                      (1 << 0)
250 #define BCH_BUCKET_MARK_NOATOMIC                (1 << 1)
251
252 int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c,
253                   bool, s64, struct gc_pos,
254                   struct bch_fs_usage *, u64, unsigned);
255 int bch2_mark_key(struct bch_fs *, struct bkey_s_c,
256                   bool, s64, struct gc_pos,
257                   struct bch_fs_usage *, u64, unsigned);
258 void bch2_mark_update(struct btree_insert *, struct btree_insert_entry *);
259 int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
260                         struct disk_reservation *);
261
262 /* disk reservations: */
263
264 void __bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *);
265
266 static inline void bch2_disk_reservation_put(struct bch_fs *c,
267                                              struct disk_reservation *res)
268 {
269         if (res->sectors)
270                 __bch2_disk_reservation_put(c, res);
271 }
272
273 #define BCH_DISK_RESERVATION_NOFAIL             (1 << 0)
274
275 int bch2_disk_reservation_add(struct bch_fs *,
276                              struct disk_reservation *,
277                              unsigned, int);
278
279 static inline struct disk_reservation
280 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
281 {
282         return (struct disk_reservation) {
283                 .sectors        = 0,
284 #if 0
285                 /* not used yet: */
286                 .gen            = c->capacity_gen,
287 #endif
288                 .nr_replicas    = nr_replicas,
289         };
290 }
291
292 static inline int bch2_disk_reservation_get(struct bch_fs *c,
293                                             struct disk_reservation *res,
294                                             unsigned sectors,
295                                             unsigned nr_replicas,
296                                             int flags)
297 {
298         *res = bch2_disk_reservation_init(c, nr_replicas);
299
300         return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
301 }
302
303 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
304 void bch2_dev_buckets_free(struct bch_dev *);
305 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
306
307 #endif /* _BUCKETS_H */