]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.h
Update bcachefs sources to ae6e8a59d3 bcachefs: quota limit enforcement
[bcachefs-tools-debian] / libbcachefs / buckets.h
1 /*
2  * Code for manipulating bucket marks for garbage collection.
3  *
4  * Copyright 2014 Datera, Inc.
5  */
6
7 #ifndef _BUCKETS_H
8 #define _BUCKETS_H
9
10 #include "buckets_types.h"
11 #include "super.h"
12
13 #define for_each_bucket(_b, _buckets)                           \
14         for (_b = (_buckets)->b + (_buckets)->first_bucket;     \
15              _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
16
17 #define bucket_cmpxchg(g, new, expr)                            \
18 ({                                                              \
19         u64 _v = READ_ONCE((g)->_mark.counter);                 \
20         struct bucket_mark _old;                                \
21                                                                 \
22         do {                                                    \
23                 (new).counter = _old.counter = _v;              \
24                 expr;                                           \
25         } while ((_v = cmpxchg(&(g)->_mark.counter,             \
26                                _old.counter,                    \
27                                (new).counter)) != _old.counter);\
28         _old;                                                   \
29 })
30
31 static inline struct bucket_array *bucket_array(struct bch_dev *ca)
32 {
33         return rcu_dereference_check(ca->buckets,
34                                      lockdep_is_held(&ca->fs->usage_lock) ||
35                                      lockdep_is_held(&ca->fs->gc_lock) ||
36                                      lockdep_is_held(&ca->bucket_lock));
37 }
38
39 static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
40 {
41         struct bucket_array *buckets = bucket_array(ca);
42
43         BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
44         return buckets->b + b;
45 }
46
47 static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
48                                          size_t b, int rw)
49 {
50         bucket(ca, b)->prio[rw] = c->prio_clock[rw].hand;
51 }
52
53 /*
54  * bucket_gc_gen() returns the difference between the bucket's current gen and
55  * the oldest gen of any pointer into that bucket in the btree.
56  */
57
58 static inline u8 bucket_gc_gen(struct bch_dev *ca, size_t b)
59 {
60         return bucket(ca, b)->mark.gen - ca->oldest_gens[b];
61 }
62
63 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
64                                    const struct bch_extent_ptr *ptr)
65 {
66         return sector_to_bucket(ca, ptr->offset);
67 }
68
69 static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
70                                         const struct bch_extent_ptr *ptr)
71 {
72         return bucket(ca, PTR_BUCKET_NR(ca, ptr));
73 }
74
75 static inline struct bucket_mark ptr_bucket_mark(struct bch_dev *ca,
76                                                  const struct bch_extent_ptr *ptr)
77 {
78         struct bucket_mark m;
79
80         rcu_read_lock();
81         m = READ_ONCE(bucket(ca, PTR_BUCKET_NR(ca, ptr))->mark);
82         rcu_read_unlock();
83
84         return m;
85 }
86
87 static inline int gen_cmp(u8 a, u8 b)
88 {
89         return (s8) (a - b);
90 }
91
92 static inline int gen_after(u8 a, u8 b)
93 {
94         int r = gen_cmp(a, b);
95
96         return r > 0 ? r : 0;
97 }
98
99 /**
100  * ptr_stale() - check if a pointer points into a bucket that has been
101  * invalidated.
102  */
103 static inline u8 ptr_stale(struct bch_dev *ca,
104                            const struct bch_extent_ptr *ptr)
105 {
106         return gen_after(ptr_bucket_mark(ca, ptr).gen, ptr->gen);
107 }
108
109 /* bucket gc marks */
110
111 /* The dirty and cached sector counts saturate. If this occurs,
112  * reference counting alone will not free the bucket, and a btree
113  * GC must be performed. */
114 #define GC_MAX_SECTORS_USED ((1U << 15) - 1)
115
116 static inline unsigned bucket_sectors_used(struct bucket_mark mark)
117 {
118         return mark.dirty_sectors + mark.cached_sectors;
119 }
120
121 static inline bool bucket_unused(struct bucket_mark mark)
122 {
123         return !mark.owned_by_allocator &&
124                 !mark.data_type &&
125                 !bucket_sectors_used(mark);
126 }
127
128 /* Device usage: */
129
130 struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *);
131 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *, struct bch_dev *);
132
133 static inline u64 __dev_buckets_available(struct bch_dev *ca,
134                                           struct bch_dev_usage stats)
135 {
136         u64 total = ca->mi.nbuckets - ca->mi.first_bucket;
137
138         if (WARN_ONCE(stats.buckets_unavailable > total,
139                       "buckets_unavailable overflow\n"))
140                 return 0;
141
142         return total - stats.buckets_unavailable;
143 }
144
145 /*
146  * Number of reclaimable buckets - only for use by the allocator thread:
147  */
148 static inline u64 dev_buckets_available(struct bch_fs *c, struct bch_dev *ca)
149 {
150         return __dev_buckets_available(ca, bch2_dev_usage_read(c, ca));
151 }
152
153 static inline u64 __dev_buckets_free(struct bch_dev *ca,
154                                      struct bch_dev_usage stats)
155 {
156         return __dev_buckets_available(ca, stats) +
157                 fifo_used(&ca->free[RESERVE_NONE]) +
158                 fifo_used(&ca->free_inc);
159 }
160
161 static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
162 {
163         return __dev_buckets_free(ca, bch2_dev_usage_read(c, ca));
164 }
165
166 /* Filesystem usage: */
167
168 static inline enum bch_data_type s_alloc_to_data_type(enum s_alloc s)
169 {
170         switch (s) {
171         case S_META:
172                 return BCH_DATA_BTREE;
173         case S_DIRTY:
174                 return BCH_DATA_USER;
175         default:
176                 BUG();
177         }
178 }
179
180 struct bch_fs_usage __bch2_fs_usage_read(struct bch_fs *);
181 struct bch_fs_usage bch2_fs_usage_read(struct bch_fs *);
182 void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
183                          struct disk_reservation *, struct gc_pos);
184
185 u64 __bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage);
186 u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage);
187 u64 bch2_fs_sectors_free(struct bch_fs *, struct bch_fs_usage);
188
189 static inline bool is_available_bucket(struct bucket_mark mark)
190 {
191         return (!mark.owned_by_allocator &&
192                 !mark.dirty_sectors &&
193                 !mark.nouse);
194 }
195
196 static inline bool bucket_needs_journal_commit(struct bucket_mark m,
197                                                u16 last_seq_ondisk)
198 {
199         return m.journal_seq_valid &&
200                 ((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
201 }
202
203 void bch2_bucket_seq_cleanup(struct bch_fs *);
204
205 bool bch2_invalidate_bucket(struct bch_fs *, struct bch_dev *,
206                             size_t, struct bucket_mark *);
207 void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *,
208                             size_t, bool, struct gc_pos, unsigned);
209 void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
210                                size_t, enum bch_data_type, unsigned,
211                                struct gc_pos, unsigned);
212
213 #define BCH_BUCKET_MARK_NOATOMIC                (1 << 0)
214 #define BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE    (1 << 1)
215 #define BCH_BUCKET_MARK_GC_WILL_VISIT           (1 << 2)
216 #define BCH_BUCKET_MARK_GC_LOCK_HELD            (1 << 3)
217
218 void bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool, struct gc_pos,
219                    struct bch_fs_usage *, u64, unsigned);
220
221 void bch2_recalc_sectors_available(struct bch_fs *);
222
223 void __bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *);
224
225 static inline void bch2_disk_reservation_put(struct bch_fs *c,
226                                              struct disk_reservation *res)
227 {
228         if (res->sectors)
229                 __bch2_disk_reservation_put(c, res);
230 }
231
232 #define BCH_DISK_RESERVATION_NOFAIL             (1 << 0)
233 #define BCH_DISK_RESERVATION_GC_LOCK_HELD       (1 << 1)
234 #define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD   (1 << 2)
235
236 int bch2_disk_reservation_add(struct bch_fs *,
237                              struct disk_reservation *,
238                              unsigned, int);
239
240 static inline struct disk_reservation
241 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
242 {
243         return (struct disk_reservation) {
244                 .sectors        = 0,
245 #if 0
246                 /* not used yet: */
247                 .gen            = c->capacity_gen,
248 #endif
249                 .nr_replicas    = nr_replicas,
250         };
251 }
252
253 static inline int bch2_disk_reservation_get(struct bch_fs *c,
254                                             struct disk_reservation *res,
255                                             unsigned sectors,
256                                             unsigned nr_replicas,
257                                             int flags)
258 {
259         *res = bch2_disk_reservation_init(c, nr_replicas);
260
261         return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
262 }
263
264 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
265 void bch2_dev_buckets_free(struct bch_dev *);
266 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
267
268 #endif /* _BUCKETS_H */