]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.h
Update bcachefs sources to f38382c574 bcachefs: Improve key marking interface
[bcachefs-tools-debian] / libbcachefs / buckets.h
1 /*
2  * Code for manipulating bucket marks for garbage collection.
3  *
4  * Copyright 2014 Datera, Inc.
5  */
6
7 #ifndef _BUCKETS_H
8 #define _BUCKETS_H
9
10 #include "buckets_types.h"
11 #include "super.h"
12
13 #define for_each_bucket(_b, _buckets)                           \
14         for (_b = (_buckets)->b + (_buckets)->first_bucket;     \
15              _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
16
17 #define bucket_cmpxchg(g, new, expr)                            \
18 ({                                                              \
19         struct bucket *_g = g;                                  \
20         u64 _v = atomic64_read(&(g)->_mark.v);                  \
21         struct bucket_mark _old;                                \
22                                                                 \
23         do {                                                    \
24                 (new).v.counter = _old.v.counter = _v;          \
25                 expr;                                           \
26         } while ((_v = atomic64_cmpxchg(&(_g)->_mark.v,         \
27                                _old.v.counter,                  \
28                                (new).v.counter)) != _old.v.counter);\
29         _old;                                                   \
30 })
31
32 static inline struct bucket_array *__bucket_array(struct bch_dev *ca,
33                                                   bool gc)
34 {
35         return rcu_dereference_check(ca->buckets[gc],
36                                      !ca->fs ||
37                                      percpu_rwsem_is_held(&ca->fs->mark_lock) ||
38                                      lockdep_is_held(&ca->fs->gc_lock) ||
39                                      lockdep_is_held(&ca->bucket_lock));
40 }
41
42 static inline struct bucket_array *bucket_array(struct bch_dev *ca)
43 {
44         return __bucket_array(ca, false);
45 }
46
47 static inline struct bucket *__bucket(struct bch_dev *ca, size_t b, bool gc)
48 {
49         struct bucket_array *buckets = __bucket_array(ca, gc);
50
51         BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
52         return buckets->b + b;
53 }
54
55 static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
56 {
57         return __bucket(ca, b, false);
58 }
59
60 static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
61                                          size_t b, int rw)
62 {
63         bucket(ca, b)->io_time[rw] = c->bucket_clock[rw].hand;
64 }
65
66 static inline u16 bucket_last_io(struct bch_fs *c, struct bucket *g, int rw)
67 {
68         return c->bucket_clock[rw].hand - g->io_time[rw];
69 }
70
71 /*
72  * bucket_gc_gen() returns the difference between the bucket's current gen and
73  * the oldest gen of any pointer into that bucket in the btree.
74  */
75
76 static inline u8 bucket_gc_gen(struct bch_dev *ca, size_t b)
77 {
78         struct bucket *g = bucket(ca, b);
79
80         return g->mark.gen - g->oldest_gen;
81 }
82
83 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
84                                    const struct bch_extent_ptr *ptr)
85 {
86         return sector_to_bucket(ca, ptr->offset);
87 }
88
89 static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
90                                         const struct bch_extent_ptr *ptr,
91                                         bool gc)
92 {
93         return __bucket(ca, PTR_BUCKET_NR(ca, ptr), gc);
94 }
95
96 static inline struct bucket_mark ptr_bucket_mark(struct bch_dev *ca,
97                                                  const struct bch_extent_ptr *ptr)
98 {
99         struct bucket_mark m;
100
101         rcu_read_lock();
102         m = READ_ONCE(PTR_BUCKET(ca, ptr, 0)->mark);
103         rcu_read_unlock();
104
105         return m;
106 }
107
108 static inline int gen_cmp(u8 a, u8 b)
109 {
110         return (s8) (a - b);
111 }
112
113 static inline int gen_after(u8 a, u8 b)
114 {
115         int r = gen_cmp(a, b);
116
117         return r > 0 ? r : 0;
118 }
119
120 /**
121  * ptr_stale() - check if a pointer points into a bucket that has been
122  * invalidated.
123  */
124 static inline u8 ptr_stale(struct bch_dev *ca,
125                            const struct bch_extent_ptr *ptr)
126 {
127         return gen_after(ptr_bucket_mark(ca, ptr).gen, ptr->gen);
128 }
129
130 static inline unsigned __ptr_disk_sectors(struct extent_ptr_decoded p,
131                                           unsigned live_size)
132 {
133         return live_size && p.crc.compression_type
134                 ? max(1U, DIV_ROUND_UP(live_size * p.crc.compressed_size,
135                                        p.crc.uncompressed_size))
136                 : live_size;
137 }
138
139 static inline unsigned ptr_disk_sectors(struct extent_ptr_decoded p)
140 {
141         return __ptr_disk_sectors(p, p.crc.live_size);
142 }
143
144 /* bucket gc marks */
145
146 static inline unsigned bucket_sectors_used(struct bucket_mark mark)
147 {
148         return mark.dirty_sectors + mark.cached_sectors;
149 }
150
151 static inline bool bucket_unused(struct bucket_mark mark)
152 {
153         return !mark.owned_by_allocator &&
154                 !mark.data_type &&
155                 !bucket_sectors_used(mark);
156 }
157
158 static inline bool is_available_bucket(struct bucket_mark mark)
159 {
160         return (!mark.owned_by_allocator &&
161                 !mark.dirty_sectors &&
162                 !mark.stripe);
163 }
164
165 static inline bool bucket_needs_journal_commit(struct bucket_mark m,
166                                                u16 last_seq_ondisk)
167 {
168         return m.journal_seq_valid &&
169                 ((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
170 }
171
172 /* Device usage: */
173
174 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *, struct bch_dev *);
175
176 void bch2_dev_usage_from_buckets(struct bch_fs *);
177
178 static inline u64 __dev_buckets_available(struct bch_dev *ca,
179                                           struct bch_dev_usage stats)
180 {
181         u64 total = ca->mi.nbuckets - ca->mi.first_bucket;
182
183         if (WARN_ONCE(stats.buckets_unavailable > total,
184                       "buckets_unavailable overflow (%llu > %llu)\n",
185                       stats.buckets_unavailable, total))
186                 return 0;
187
188         return total - stats.buckets_unavailable;
189 }
190
191 /*
192  * Number of reclaimable buckets - only for use by the allocator thread:
193  */
194 static inline u64 dev_buckets_available(struct bch_fs *c, struct bch_dev *ca)
195 {
196         return __dev_buckets_available(ca, bch2_dev_usage_read(c, ca));
197 }
198
199 static inline u64 __dev_buckets_free(struct bch_dev *ca,
200                                      struct bch_dev_usage stats)
201 {
202         return __dev_buckets_available(ca, stats) +
203                 fifo_used(&ca->free[RESERVE_NONE]) +
204                 fifo_used(&ca->free_inc);
205 }
206
207 static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
208 {
209         return __dev_buckets_free(ca, bch2_dev_usage_read(c, ca));
210 }
211
212 /* Filesystem usage: */
213
214 static inline unsigned fs_usage_u64s(struct bch_fs *c)
215 {
216
217         return sizeof(struct bch_fs_usage) / sizeof(u64) +
218                 READ_ONCE(c->replicas.nr);
219 }
220
221 void bch2_fs_usage_scratch_put(struct bch_fs *, struct bch_fs_usage *);
222 struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *);
223
224 u64 bch2_fs_usage_read_one(struct bch_fs *, u64 *);
225
226 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *);
227
228 void bch2_fs_usage_acc_to_base(struct bch_fs *, unsigned);
229
230 void bch2_fs_usage_to_text(struct printbuf *,
231                            struct bch_fs *, struct bch_fs_usage *);
232
233 u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage *);
234
235 struct bch_fs_usage_short
236 bch2_fs_usage_read_short(struct bch_fs *);
237
238 /* key/bucket marking: */
239
240 void bch2_bucket_seq_cleanup(struct bch_fs *);
241 void bch2_fs_usage_initialize(struct bch_fs *);
242
243 void bch2_invalidate_bucket(struct bch_fs *, struct bch_dev *,
244                             size_t, struct bucket_mark *);
245 void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *,
246                             size_t, bool, struct gc_pos, unsigned);
247 void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
248                                size_t, enum bch_data_type, unsigned,
249                                struct gc_pos, unsigned);
250
251 #define BCH_BUCKET_MARK_INSERT                  (1 << 0)
252 #define BCH_BUCKET_MARK_OVERWRITE               (1 << 1)
253 #define BCH_BUCKET_MARK_BUCKET_INVALIDATE       (1 << 2)
254 #define BCH_BUCKET_MARK_GC                      (1 << 3)
255 #define BCH_BUCKET_MARK_ALLOC_READ              (1 << 4)
256 #define BCH_BUCKET_MARK_NOATOMIC                (1 << 5)
257
258 int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c, s64,
259                          struct bch_fs_usage *, u64, unsigned);
260 int bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64,
261                   struct bch_fs_usage *, u64, unsigned);
262 int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
263                         struct disk_reservation *, unsigned);
264
265 int bch2_mark_overwrite(struct btree_trans *, struct btree_iter *,
266                         struct bkey_s_c, struct bkey_i *,
267                         struct bch_fs_usage *, unsigned);
268 int bch2_mark_update(struct btree_trans *, struct btree_insert_entry *,
269                      struct bch_fs_usage *, unsigned);
270
271 void bch2_replicas_delta_list_apply(struct bch_fs *,
272                                     struct bch_fs_usage *,
273                                     struct replicas_delta_list *);
274 int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, s64, unsigned);
275 int bch2_trans_mark_update(struct btree_trans *,
276                            struct btree_insert_entry *);
277 void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage *);
278
279 /* disk reservations: */
280
281 void __bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *);
282
283 static inline void bch2_disk_reservation_put(struct bch_fs *c,
284                                              struct disk_reservation *res)
285 {
286         if (res->sectors)
287                 __bch2_disk_reservation_put(c, res);
288 }
289
290 #define BCH_DISK_RESERVATION_NOFAIL             (1 << 0)
291
292 int bch2_disk_reservation_add(struct bch_fs *,
293                              struct disk_reservation *,
294                              unsigned, int);
295
296 static inline struct disk_reservation
297 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
298 {
299         return (struct disk_reservation) {
300                 .sectors        = 0,
301 #if 0
302                 /* not used yet: */
303                 .gen            = c->capacity_gen,
304 #endif
305                 .nr_replicas    = nr_replicas,
306         };
307 }
308
309 static inline int bch2_disk_reservation_get(struct bch_fs *c,
310                                             struct disk_reservation *res,
311                                             unsigned sectors,
312                                             unsigned nr_replicas,
313                                             int flags)
314 {
315         *res = bch2_disk_reservation_init(c, nr_replicas);
316
317         return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
318 }
319
320 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
321 void bch2_dev_buckets_free(struct bch_dev *);
322 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
323
324 #endif /* _BUCKETS_H */