]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.h
Update bcachefs sources to b12d1535f3 bcachefs: fix bounds checks in bch2_bio_map()
[bcachefs-tools-debian] / libbcachefs / buckets.h
1 /*
2  * Code for manipulating bucket marks for garbage collection.
3  *
4  * Copyright 2014 Datera, Inc.
5  */
6
7 #ifndef _BUCKETS_H
8 #define _BUCKETS_H
9
10 #include "buckets_types.h"
11 #include "super.h"
12
13 #define for_each_bucket(_b, _buckets)                           \
14         for (_b = (_buckets)->b + (_buckets)->first_bucket;     \
15              _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
16
17 #define bucket_cmpxchg(g, new, expr)                            \
18 ({                                                              \
19         u64 _v = atomic64_read(&(g)->_mark.v);                  \
20         struct bucket_mark _old;                                \
21                                                                 \
22         do {                                                    \
23                 (new).v.counter = _old.v.counter = _v;          \
24                 expr;                                           \
25         } while ((_v = atomic64_cmpxchg(&(g)->_mark.v,          \
26                                _old.v.counter,                  \
27                                (new).v.counter)) != _old.v.counter);\
28         _old;                                                   \
29 })
30
31 static inline struct bucket_array *bucket_array(struct bch_dev *ca)
32 {
33         return rcu_dereference_check(ca->buckets,
34                                      !ca->fs ||
35                                      percpu_rwsem_is_held(&ca->fs->usage_lock) ||
36                                      lockdep_is_held(&ca->fs->gc_lock) ||
37                                      lockdep_is_held(&ca->bucket_lock));
38 }
39
40 static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
41 {
42         struct bucket_array *buckets = bucket_array(ca);
43
44         BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
45         return buckets->b + b;
46 }
47
48 static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
49                                          size_t b, int rw)
50 {
51         bucket(ca, b)->io_time[rw] = c->bucket_clock[rw].hand;
52 }
53
54 static inline u16 bucket_last_io(struct bch_fs *c, struct bucket *g, int rw)
55 {
56         return c->bucket_clock[rw].hand - g->io_time[rw];
57 }
58
59 /*
60  * bucket_gc_gen() returns the difference between the bucket's current gen and
61  * the oldest gen of any pointer into that bucket in the btree.
62  */
63
64 static inline u8 bucket_gc_gen(struct bch_dev *ca, size_t b)
65 {
66         return bucket(ca, b)->mark.gen - ca->oldest_gens[b];
67 }
68
69 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
70                                    const struct bch_extent_ptr *ptr)
71 {
72         return sector_to_bucket(ca, ptr->offset);
73 }
74
75 static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
76                                         const struct bch_extent_ptr *ptr)
77 {
78         return bucket(ca, PTR_BUCKET_NR(ca, ptr));
79 }
80
81 static inline struct bucket_mark ptr_bucket_mark(struct bch_dev *ca,
82                                                  const struct bch_extent_ptr *ptr)
83 {
84         struct bucket_mark m;
85
86         rcu_read_lock();
87         m = READ_ONCE(bucket(ca, PTR_BUCKET_NR(ca, ptr))->mark);
88         rcu_read_unlock();
89
90         return m;
91 }
92
93 static inline int gen_cmp(u8 a, u8 b)
94 {
95         return (s8) (a - b);
96 }
97
98 static inline int gen_after(u8 a, u8 b)
99 {
100         int r = gen_cmp(a, b);
101
102         return r > 0 ? r : 0;
103 }
104
105 /**
106  * ptr_stale() - check if a pointer points into a bucket that has been
107  * invalidated.
108  */
109 static inline u8 ptr_stale(struct bch_dev *ca,
110                            const struct bch_extent_ptr *ptr)
111 {
112         return gen_after(ptr_bucket_mark(ca, ptr).gen, ptr->gen);
113 }
114
115 /* bucket gc marks */
116
117 static inline unsigned bucket_sectors_used(struct bucket_mark mark)
118 {
119         return mark.dirty_sectors + mark.cached_sectors;
120 }
121
122 static inline bool bucket_unused(struct bucket_mark mark)
123 {
124         return !mark.owned_by_allocator &&
125                 !mark.data_type &&
126                 !bucket_sectors_used(mark);
127 }
128
129 /* Device usage: */
130
131 struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *);
132 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *, struct bch_dev *);
133
134 static inline u64 __dev_buckets_available(struct bch_dev *ca,
135                                           struct bch_dev_usage stats)
136 {
137         u64 total = ca->mi.nbuckets - ca->mi.first_bucket;
138
139         if (WARN_ONCE(stats.buckets_unavailable > total,
140                       "buckets_unavailable overflow (%llu > %llu)\n",
141                       stats.buckets_unavailable, total))
142                 return 0;
143
144         return total - stats.buckets_unavailable;
145 }
146
147 /*
148  * Number of reclaimable buckets - only for use by the allocator thread:
149  */
150 static inline u64 dev_buckets_available(struct bch_fs *c, struct bch_dev *ca)
151 {
152         return __dev_buckets_available(ca, bch2_dev_usage_read(c, ca));
153 }
154
155 static inline u64 __dev_buckets_free(struct bch_dev *ca,
156                                      struct bch_dev_usage stats)
157 {
158         return __dev_buckets_available(ca, stats) +
159                 fifo_used(&ca->free[RESERVE_NONE]) +
160                 fifo_used(&ca->free_inc);
161 }
162
163 static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
164 {
165         return __dev_buckets_free(ca, bch2_dev_usage_read(c, ca));
166 }
167
168 /* Filesystem usage: */
169
170 struct bch_fs_usage __bch2_fs_usage_read(struct bch_fs *);
171 struct bch_fs_usage bch2_fs_usage_read(struct bch_fs *);
172 void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
173                          struct disk_reservation *, struct gc_pos);
174
175 u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage);
176
177 static inline bool is_available_bucket(struct bucket_mark mark)
178 {
179         return (!mark.owned_by_allocator &&
180                 !mark.dirty_sectors &&
181                 !mark.nouse);
182 }
183
184 static inline bool bucket_needs_journal_commit(struct bucket_mark m,
185                                                u16 last_seq_ondisk)
186 {
187         return m.journal_seq_valid &&
188                 ((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
189 }
190
191 void bch2_bucket_seq_cleanup(struct bch_fs *);
192
193 void bch2_invalidate_bucket(struct bch_fs *, struct bch_dev *,
194                             size_t, struct bucket_mark *);
195 void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *,
196                             size_t, bool, struct gc_pos, unsigned);
197 void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
198                                size_t, enum bch_data_type, unsigned,
199                                struct gc_pos, unsigned);
200
201 #define BCH_BUCKET_MARK_NOATOMIC                (1 << 0)
202 #define BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE    (1 << 1)
203 #define BCH_BUCKET_MARK_GC_WILL_VISIT           (1 << 2)
204 #define BCH_BUCKET_MARK_GC_LOCK_HELD            (1 << 3)
205
206 void bch2_mark_key(struct bch_fs *, enum bkey_type, struct bkey_s_c,
207                    bool, s64, struct gc_pos,
208                    struct bch_fs_usage *, u64, unsigned);
209
210 void bch2_recalc_sectors_available(struct bch_fs *);
211
212 void __bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *);
213
214 static inline void bch2_disk_reservation_put(struct bch_fs *c,
215                                              struct disk_reservation *res)
216 {
217         if (res->sectors)
218                 __bch2_disk_reservation_put(c, res);
219 }
220
221 #define BCH_DISK_RESERVATION_NOFAIL             (1 << 0)
222 #define BCH_DISK_RESERVATION_GC_LOCK_HELD       (1 << 1)
223 #define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD   (1 << 2)
224
225 int bch2_disk_reservation_add(struct bch_fs *,
226                              struct disk_reservation *,
227                              unsigned, int);
228
229 static inline struct disk_reservation
230 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
231 {
232         return (struct disk_reservation) {
233                 .sectors        = 0,
234 #if 0
235                 /* not used yet: */
236                 .gen            = c->capacity_gen,
237 #endif
238                 .nr_replicas    = nr_replicas,
239         };
240 }
241
242 static inline int bch2_disk_reservation_get(struct bch_fs *c,
243                                             struct disk_reservation *res,
244                                             unsigned sectors,
245                                             unsigned nr_replicas,
246                                             int flags)
247 {
248         *res = bch2_disk_reservation_init(c, nr_replicas);
249
250         return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
251 }
252
253 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
254 void bch2_dev_buckets_free(struct bch_dev *);
255 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
256
257 #endif /* _BUCKETS_H */