]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/super.h
Update bcachefs sources to 070ec8d07b bcachefs: Snapshot depth, skiplist fields
[bcachefs-tools-debian] / libbcachefs / super.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_SUPER_H
3 #define _BCACHEFS_SUPER_H
4
5 #include "extents.h"
6
7 #include "bcachefs_ioctl.h"
8
9 #include <linux/math64.h>
10
11 static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
12 {
13         return div_u64(s, ca->mi.bucket_size);
14 }
15
16 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
17 {
18         return ((sector_t) b) * ca->mi.bucket_size;
19 }
20
21 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
22 {
23         u32 remainder;
24
25         div_u64_rem(s, ca->mi.bucket_size, &remainder);
26         return remainder;
27 }
28
29 static inline size_t sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s,
30                                                  u32 *offset)
31 {
32         return div_u64_rem(s, ca->mi.bucket_size, offset);
33 }
34
35 static inline bool bch2_dev_is_online(struct bch_dev *ca)
36 {
37         return !percpu_ref_is_zero(&ca->io_ref);
38 }
39
40 static inline bool bch2_dev_is_readable(struct bch_dev *ca)
41 {
42         return bch2_dev_is_online(ca) &&
43                 ca->mi.state != BCH_MEMBER_STATE_failed;
44 }
45
46 static inline bool bch2_dev_get_ioref(struct bch_dev *ca, int rw)
47 {
48         if (!percpu_ref_tryget(&ca->io_ref))
49                 return false;
50
51         if (ca->mi.state == BCH_MEMBER_STATE_rw ||
52             (ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ))
53                 return true;
54
55         percpu_ref_put(&ca->io_ref);
56         return false;
57 }
58
59 static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs)
60 {
61         return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX);
62 }
63
64 static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs,
65                                          unsigned dev)
66 {
67         unsigned i;
68
69         for (i = 0; i < devs.nr; i++)
70                 if (devs.devs[i] == dev)
71                         return true;
72
73         return false;
74 }
75
76 static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs,
77                                           unsigned dev)
78 {
79         unsigned i;
80
81         for (i = 0; i < devs->nr; i++)
82                 if (devs->devs[i] == dev) {
83                         array_remove_item(devs->devs, devs->nr, i);
84                         return;
85                 }
86 }
87
88 static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs,
89                                          unsigned dev)
90 {
91         if (!bch2_dev_list_has_dev(*devs, dev)) {
92                 BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs));
93                 devs->devs[devs->nr++] = dev;
94         }
95 }
96
97 static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)
98 {
99         return (struct bch_devs_list) { .nr = 1, .devs[0] = dev };
100 }
101
102 static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter,
103                                               const struct bch_devs_mask *mask)
104 {
105         struct bch_dev *ca = NULL;
106
107         while ((*iter = mask
108                 ? find_next_bit(mask->d, c->sb.nr_devices, *iter)
109                 : *iter) < c->sb.nr_devices &&
110                !(ca = rcu_dereference_check(c->devs[*iter],
111                                             lockdep_is_held(&c->state_lock))))
112                 (*iter)++;
113
114         return ca;
115 }
116
117 #define for_each_member_device_rcu(ca, c, iter, mask)                   \
118         for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++)
119
120 static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter)
121 {
122         struct bch_dev *ca;
123
124         rcu_read_lock();
125         if ((ca = __bch2_next_dev(c, iter, NULL)))
126                 percpu_ref_get(&ca->ref);
127         rcu_read_unlock();
128
129         return ca;
130 }
131
132 /*
133  * If you break early, you must drop your ref on the current device
134  */
135 #define for_each_member_device(ca, c, iter)                             \
136         for ((iter) = 0;                                                \
137              (ca = bch2_get_next_dev(c, &(iter)));                      \
138              percpu_ref_put(&ca->ref), (iter)++)
139
140 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
141                                                       unsigned *iter,
142                                                       int state_mask)
143 {
144         struct bch_dev *ca;
145
146         rcu_read_lock();
147         while ((ca = __bch2_next_dev(c, iter, NULL)) &&
148                (!((1 << ca->mi.state) & state_mask) ||
149                 !percpu_ref_tryget(&ca->io_ref)))
150                 (*iter)++;
151         rcu_read_unlock();
152
153         return ca;
154 }
155
156 #define __for_each_online_member(ca, c, iter, state_mask)               \
157         for ((iter) = 0;                                                \
158              (ca = bch2_get_next_online_dev(c, &(iter), state_mask));   \
159              percpu_ref_put(&ca->io_ref), (iter)++)
160
161 #define for_each_online_member(ca, c, iter)                             \
162         __for_each_online_member(ca, c, iter, ~0)
163
164 #define for_each_rw_member(ca, c, iter)                                 \
165         __for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_rw)
166
167 #define for_each_readable_member(ca, c, iter)                           \
168         __for_each_online_member(ca, c, iter,                           \
169                 (1 << BCH_MEMBER_STATE_rw)|(1 << BCH_MEMBER_STATE_ro))
170
171 /*
172  * If a key exists that references a device, the device won't be going away and
173  * we can omit rcu_read_lock():
174  */
175 static inline struct bch_dev *bch_dev_bkey_exists(const struct bch_fs *c, unsigned idx)
176 {
177         EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
178
179         return rcu_dereference_check(c->devs[idx], 1);
180 }
181
182 static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx)
183 {
184         EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
185
186         return rcu_dereference_protected(c->devs[idx],
187                                          lockdep_is_held(&c->sb_lock) ||
188                                          lockdep_is_held(&c->state_lock));
189 }
190
191 /* XXX kill, move to struct bch_fs */
192 static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
193 {
194         struct bch_devs_mask devs;
195         struct bch_dev *ca;
196         unsigned i;
197
198         memset(&devs, 0, sizeof(devs));
199         for_each_online_member(ca, c, i)
200                 __set_bit(ca->dev_idx, devs.d);
201         return devs;
202 }
203
204 static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
205 {
206         struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
207         u64 b_offset    = bucket_to_sector(ca, b);
208         u64 b_end       = bucket_to_sector(ca, b + 1);
209         unsigned i;
210
211         if (!b)
212                 return true;
213
214         for (i = 0; i < layout->nr_superblocks; i++) {
215                 u64 offset = le64_to_cpu(layout->sb_offset[i]);
216                 u64 end = offset + (1 << layout->sb_max_size_bits);
217
218                 if (!(offset >= b_end || end <= b_offset))
219                         return true;
220         }
221
222         return false;
223 }
224
225 struct bch_fs *bch2_dev_to_fs(dev_t);
226 struct bch_fs *bch2_uuid_to_fs(__uuid_t);
227
228 bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *,
229                            enum bch_member_state, int);
230 int __bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
231                         enum bch_member_state, int);
232 int bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
233                       enum bch_member_state, int);
234
235 int bch2_dev_fail(struct bch_dev *, int);
236 int bch2_dev_remove(struct bch_fs *, struct bch_dev *, int);
237 int bch2_dev_add(struct bch_fs *, const char *);
238 int bch2_dev_online(struct bch_fs *, const char *);
239 int bch2_dev_offline(struct bch_fs *, struct bch_dev *, int);
240 int bch2_dev_resize(struct bch_fs *, struct bch_dev *, u64);
241 struct bch_dev *bch2_dev_lookup(struct bch_fs *, const char *);
242
243 bool bch2_fs_emergency_read_only(struct bch_fs *);
244 void bch2_fs_read_only(struct bch_fs *);
245
246 int bch2_fs_read_write(struct bch_fs *);
247 int bch2_fs_read_write_early(struct bch_fs *);
248
249 /*
250  * Only for use in the recovery/fsck path:
251  */
252 static inline void bch2_fs_lazy_rw(struct bch_fs *c)
253 {
254         if (!test_bit(BCH_FS_RW, &c->flags) &&
255             !test_bit(BCH_FS_WAS_RW, &c->flags))
256                 bch2_fs_read_write_early(c);
257 }
258
259 void __bch2_fs_stop(struct bch_fs *);
260 void bch2_fs_free(struct bch_fs *);
261 void bch2_fs_stop(struct bch_fs *);
262
263 int bch2_fs_start(struct bch_fs *);
264 struct bch_fs *bch2_fs_open(char * const *, unsigned, struct bch_opts);
265
266 #endif /* _BCACHEFS_SUPER_H */