]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/super.h
d0a38cf6750882e38dfcece4123e202c71f705e3
[bcachefs-tools-debian] / libbcachefs / super.h
1 #ifndef _BCACHEFS_SUPER_H
2 #define _BCACHEFS_SUPER_H
3
4 #include "extents.h"
5
6 #include "bcachefs_ioctl.h"
7
8 #include <linux/math64.h>
9
10 static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
11 {
12         return div_u64(s, ca->mi.bucket_size);
13 }
14
15 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
16 {
17         return ((sector_t) b) * ca->mi.bucket_size;
18 }
19
20 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
21 {
22         u32 remainder;
23
24         div_u64_rem(s, ca->mi.bucket_size, &remainder);
25         return remainder;
26 }
27
28 static inline bool bch2_dev_is_online(struct bch_dev *ca)
29 {
30         return ca->disk_sb.bdev != NULL;
31 }
32
33 static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs)
34 {
35         return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX);
36 }
37
38 static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs,
39                                          unsigned dev)
40 {
41         unsigned i;
42
43         for (i = 0; i < devs.nr; i++)
44                 if (devs.devs[i] == dev)
45                         return true;
46
47         return false;
48 }
49
50 static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs,
51                                           unsigned dev)
52 {
53         unsigned i;
54
55         for (i = 0; i < devs->nr; i++)
56                 if (devs->devs[i] == dev) {
57                         array_remove_item(devs->devs, devs->nr, i);
58                         return;
59                 }
60 }
61
62 static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs,
63                                          unsigned dev)
64 {
65         BUG_ON(bch2_dev_list_has_dev(*devs, dev));
66         BUG_ON(devs->nr >= BCH_REPLICAS_MAX);
67         devs->devs[devs->nr++] = dev;
68 }
69
70 static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter,
71                                               const struct bch_devs_mask *mask)
72 {
73         struct bch_dev *ca = NULL;
74
75         while ((*iter = mask
76                 ? find_next_bit(mask->d, c->sb.nr_devices, *iter)
77                 : *iter) < c->sb.nr_devices &&
78                !(ca = rcu_dereference_check(c->devs[*iter],
79                                             lockdep_is_held(&c->state_lock))))
80                 (*iter)++;
81
82         return ca;
83 }
84
85 #define __for_each_member_device(ca, c, iter, mask)                     \
86         for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++)
87
88 #define for_each_member_device_rcu(ca, c, iter, mask)                   \
89         __for_each_member_device(ca, c, iter, mask)
90
91 static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter)
92 {
93         struct bch_dev *ca;
94
95         rcu_read_lock();
96         if ((ca = __bch2_next_dev(c, iter, NULL)))
97                 percpu_ref_get(&ca->ref);
98         rcu_read_unlock();
99
100         return ca;
101 }
102
103 /*
104  * If you break early, you must drop your ref on the current device
105  */
106 #define for_each_member_device(ca, c, iter)                             \
107         for ((iter) = 0;                                                \
108              (ca = bch2_get_next_dev(c, &(iter)));                      \
109              percpu_ref_put(&ca->ref), (iter)++)
110
111 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
112                                                       unsigned *iter,
113                                                       int state_mask)
114 {
115         struct bch_dev *ca;
116
117         rcu_read_lock();
118         while ((ca = __bch2_next_dev(c, iter, NULL)) &&
119                (!((1 << ca->mi.state) & state_mask) ||
120                 !percpu_ref_tryget(&ca->io_ref)))
121                 (*iter)++;
122         rcu_read_unlock();
123
124         return ca;
125 }
126
127 #define __for_each_online_member(ca, c, iter, state_mask)               \
128         for ((iter) = 0;                                                \
129              (ca = bch2_get_next_online_dev(c, &(iter), state_mask));   \
130              percpu_ref_put(&ca->io_ref), (iter)++)
131
132 #define for_each_online_member(ca, c, iter)                             \
133         __for_each_online_member(ca, c, iter, ~0)
134
135 #define for_each_rw_member(ca, c, iter)                                 \
136         __for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_RW)
137
138 #define for_each_readable_member(ca, c, iter)                           \
139         __for_each_online_member(ca, c, iter,                           \
140                 (1 << BCH_MEMBER_STATE_RW)|(1 << BCH_MEMBER_STATE_RO))
141
142 /*
143  * If a key exists that references a device, the device won't be going away and
144  * we can omit rcu_read_lock():
145  */
146 static inline struct bch_dev *bch_dev_bkey_exists(const struct bch_fs *c, unsigned idx)
147 {
148         EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
149
150         return rcu_dereference_check(c->devs[idx], 1);
151 }
152
153 static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx)
154 {
155         EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
156
157         return rcu_dereference_protected(c->devs[idx],
158                                          lockdep_is_held(&c->sb_lock) ||
159                                          lockdep_is_held(&c->state_lock));
160 }
161
162 /* XXX kill, move to struct bch_fs */
163 static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
164 {
165         struct bch_devs_mask devs;
166         struct bch_dev *ca;
167         unsigned i;
168
169         memset(&devs, 0, sizeof(devs));
170         for_each_online_member(ca, c, i)
171                 __set_bit(ca->dev_idx, devs.d);
172         return devs;
173 }
174
175 struct bch_fs *bch2_bdev_to_fs(struct block_device *);
176 struct bch_fs *bch2_uuid_to_fs(uuid_le);
177 int bch2_congested(void *, int);
178
179 bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *,
180                            enum bch_member_state, int);
181 int __bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
182                         enum bch_member_state, int);
183 int bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
184                       enum bch_member_state, int);
185
186 int bch2_dev_fail(struct bch_dev *, int);
187 int bch2_dev_remove(struct bch_fs *, struct bch_dev *, int);
188 int bch2_dev_add(struct bch_fs *, const char *);
189 int bch2_dev_online(struct bch_fs *, const char *);
190 int bch2_dev_offline(struct bch_fs *, struct bch_dev *, int);
191 int bch2_dev_resize(struct bch_fs *, struct bch_dev *, u64);
192
193 bool bch2_fs_emergency_read_only(struct bch_fs *);
194 void bch2_fs_read_only(struct bch_fs *);
195 const char *bch2_fs_read_write(struct bch_fs *);
196
197 void bch2_fs_stop(struct bch_fs *);
198
199 const char *bch2_fs_start(struct bch_fs *);
200 struct bch_fs *bch2_fs_open(char * const *, unsigned, struct bch_opts);
201 const char *bch2_fs_open_incremental(const char *path);
202
203 #endif /* _BCACHEFS_SUPER_H */