]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.h
Update bcachefs sources to 9abf628c70 bcachefs: Fix a spurious error in fsck
[bcachefs-tools-debian] / libbcachefs / buckets.h
1 /*
2  * Code for manipulating bucket marks for garbage collection.
3  *
4  * Copyright 2014 Datera, Inc.
5  */
6
7 #ifndef _BUCKETS_H
8 #define _BUCKETS_H
9
10 #include "buckets_types.h"
11 #include "super.h"
12
13 #define for_each_bucket(_b, _buckets)                           \
14         for (_b = (_buckets)->b + (_buckets)->first_bucket;     \
15              _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
16
17 #define bucket_cmpxchg(g, new, expr)                            \
18 ({                                                              \
19         u64 _v = atomic64_read(&(g)->_mark.v);                  \
20         struct bucket_mark _old;                                \
21                                                                 \
22         do {                                                    \
23                 (new).v.counter = _old.v.counter = _v;          \
24                 expr;                                           \
25         } while ((_v = atomic64_cmpxchg(&(g)->_mark.v,          \
26                                _old.v.counter,                  \
27                                (new).v.counter)) != _old.v.counter);\
28         _old;                                                   \
29 })
30
31 static inline struct bucket_array *bucket_array(struct bch_dev *ca)
32 {
33         return rcu_dereference_check(ca->buckets,
34                                      !ca->fs ||
35                                      percpu_rwsem_is_held(&ca->fs->usage_lock) ||
36                                      lockdep_is_held(&ca->fs->gc_lock) ||
37                                      lockdep_is_held(&ca->bucket_lock));
38 }
39
40 static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
41 {
42         struct bucket_array *buckets = bucket_array(ca);
43
44         BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
45         return buckets->b + b;
46 }
47
48 static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
49                                          size_t b, int rw)
50 {
51         bucket(ca, b)->io_time[rw] = c->bucket_clock[rw].hand;
52 }
53
54 static inline u16 bucket_last_io(struct bch_fs *c, struct bucket *g, int rw)
55 {
56         return c->bucket_clock[rw].hand - g->io_time[rw];
57 }
58
59 /*
60  * bucket_gc_gen() returns the difference between the bucket's current gen and
61  * the oldest gen of any pointer into that bucket in the btree.
62  */
63
64 static inline u8 bucket_gc_gen(struct bch_dev *ca, size_t b)
65 {
66         return bucket(ca, b)->mark.gen - ca->oldest_gens[b];
67 }
68
69 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
70                                    const struct bch_extent_ptr *ptr)
71 {
72         return sector_to_bucket(ca, ptr->offset);
73 }
74
75 static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
76                                         const struct bch_extent_ptr *ptr)
77 {
78         return bucket(ca, PTR_BUCKET_NR(ca, ptr));
79 }
80
81 static inline struct bucket_mark ptr_bucket_mark(struct bch_dev *ca,
82                                                  const struct bch_extent_ptr *ptr)
83 {
84         struct bucket_mark m;
85
86         rcu_read_lock();
87         m = READ_ONCE(bucket(ca, PTR_BUCKET_NR(ca, ptr))->mark);
88         rcu_read_unlock();
89
90         return m;
91 }
92
93 static inline int gen_cmp(u8 a, u8 b)
94 {
95         return (s8) (a - b);
96 }
97
98 static inline int gen_after(u8 a, u8 b)
99 {
100         int r = gen_cmp(a, b);
101
102         return r > 0 ? r : 0;
103 }
104
105 /**
106  * ptr_stale() - check if a pointer points into a bucket that has been
107  * invalidated.
108  */
109 static inline u8 ptr_stale(struct bch_dev *ca,
110                            const struct bch_extent_ptr *ptr)
111 {
112         return gen_after(ptr_bucket_mark(ca, ptr).gen, ptr->gen);
113 }
114
115 /* bucket gc marks */
116
117 /* The dirty and cached sector counts saturate. If this occurs,
118  * reference counting alone will not free the bucket, and a btree
119  * GC must be performed. */
120 #define GC_MAX_SECTORS_USED ((1U << 15) - 1)
121
122 static inline unsigned bucket_sectors_used(struct bucket_mark mark)
123 {
124         return mark.dirty_sectors + mark.cached_sectors;
125 }
126
127 static inline bool bucket_unused(struct bucket_mark mark)
128 {
129         return !mark.owned_by_allocator &&
130                 !mark.data_type &&
131                 !bucket_sectors_used(mark);
132 }
133
134 /* Device usage: */
135
136 struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *);
137 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *, struct bch_dev *);
138
139 static inline u64 __dev_buckets_available(struct bch_dev *ca,
140                                           struct bch_dev_usage stats)
141 {
142         u64 total = ca->mi.nbuckets - ca->mi.first_bucket;
143
144         if (WARN_ONCE(stats.buckets_unavailable > total,
145                       "buckets_unavailable overflow (%llu > %llu)\n",
146                       stats.buckets_unavailable, total))
147                 return 0;
148
149         return total - stats.buckets_unavailable;
150 }
151
152 /*
153  * Number of reclaimable buckets - only for use by the allocator thread:
154  */
155 static inline u64 dev_buckets_available(struct bch_fs *c, struct bch_dev *ca)
156 {
157         return __dev_buckets_available(ca, bch2_dev_usage_read(c, ca));
158 }
159
160 static inline u64 __dev_buckets_free(struct bch_dev *ca,
161                                      struct bch_dev_usage stats)
162 {
163         return __dev_buckets_available(ca, stats) +
164                 fifo_used(&ca->free[RESERVE_NONE]) +
165                 fifo_used(&ca->free_inc);
166 }
167
168 static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
169 {
170         return __dev_buckets_free(ca, bch2_dev_usage_read(c, ca));
171 }
172
173 /* Filesystem usage: */
174
175 static inline enum bch_data_type s_alloc_to_data_type(enum s_alloc s)
176 {
177         switch (s) {
178         case S_META:
179                 return BCH_DATA_BTREE;
180         case S_DIRTY:
181                 return BCH_DATA_USER;
182         default:
183                 BUG();
184         }
185 }
186
187 struct bch_fs_usage __bch2_fs_usage_read(struct bch_fs *);
188 struct bch_fs_usage bch2_fs_usage_read(struct bch_fs *);
189 void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
190                          struct disk_reservation *, struct gc_pos);
191
192 u64 __bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage);
193 u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage);
194 u64 bch2_fs_sectors_free(struct bch_fs *, struct bch_fs_usage);
195
196 static inline bool is_available_bucket(struct bucket_mark mark)
197 {
198         return (!mark.owned_by_allocator &&
199                 !mark.dirty_sectors &&
200                 !mark.nouse);
201 }
202
203 static inline bool bucket_needs_journal_commit(struct bucket_mark m,
204                                                u16 last_seq_ondisk)
205 {
206         return m.journal_seq_valid &&
207                 ((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
208 }
209
210 void bch2_bucket_seq_cleanup(struct bch_fs *);
211
212 bool bch2_invalidate_bucket(struct bch_fs *, struct bch_dev *,
213                             size_t, struct bucket_mark *);
214 void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *,
215                             size_t, bool, struct gc_pos, unsigned);
216 void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
217                                size_t, enum bch_data_type, unsigned,
218                                struct gc_pos, unsigned);
219
220 #define BCH_BUCKET_MARK_NOATOMIC                (1 << 0)
221 #define BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE    (1 << 1)
222 #define BCH_BUCKET_MARK_GC_WILL_VISIT           (1 << 2)
223 #define BCH_BUCKET_MARK_GC_LOCK_HELD            (1 << 3)
224
225 void bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool, struct gc_pos,
226                    struct bch_fs_usage *, u64, unsigned);
227
228 void bch2_recalc_sectors_available(struct bch_fs *);
229
230 void __bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *);
231
232 static inline void bch2_disk_reservation_put(struct bch_fs *c,
233                                              struct disk_reservation *res)
234 {
235         if (res->sectors)
236                 __bch2_disk_reservation_put(c, res);
237 }
238
239 #define BCH_DISK_RESERVATION_NOFAIL             (1 << 0)
240 #define BCH_DISK_RESERVATION_GC_LOCK_HELD       (1 << 1)
241 #define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD   (1 << 2)
242
243 int bch2_disk_reservation_add(struct bch_fs *,
244                              struct disk_reservation *,
245                              unsigned, int);
246
247 static inline struct disk_reservation
248 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
249 {
250         return (struct disk_reservation) {
251                 .sectors        = 0,
252 #if 0
253                 /* not used yet: */
254                 .gen            = c->capacity_gen,
255 #endif
256                 .nr_replicas    = nr_replicas,
257         };
258 }
259
260 static inline int bch2_disk_reservation_get(struct bch_fs *c,
261                                             struct disk_reservation *res,
262                                             unsigned sectors,
263                                             unsigned nr_replicas,
264                                             int flags)
265 {
266         *res = bch2_disk_reservation_init(c, nr_replicas);
267
268         return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
269 }
270
271 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
272 void bch2_dev_buckets_free(struct bch_dev *);
273 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
274
275 #endif /* _BUCKETS_H */