1 #ifndef _BCACHEFS_SUPER_H
2 #define _BCACHEFS_SUPER_H
6 #include "bcachefs_ioctl.h"
8 #include <linux/math64.h>
10 static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
12 return div_u64(s, ca->mi.bucket_size);
15 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
17 return ((sector_t) b) * ca->mi.bucket_size;
20 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
24 div_u64_rem(s, ca->mi.bucket_size, &remainder);
28 static inline bool bch2_dev_is_online(struct bch_dev *ca)
30 return ca->disk_sb.bdev != NULL;
33 static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs)
35 return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX);
38 static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs,
43 for (i = 0; i < devs.nr; i++)
44 if (devs.devs[i] == dev)
50 static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs,
55 for (i = 0; i < devs->nr; i++)
56 if (devs->devs[i] == dev) {
57 array_remove_item(devs->devs, devs->nr, i);
62 static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs,
65 BUG_ON(bch2_dev_list_has_dev(*devs, dev));
66 BUG_ON(devs->nr >= BCH_REPLICAS_MAX);
67 devs->devs[devs->nr++] = dev;
70 static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)
72 return (struct bch_devs_list) { .nr = 1, .devs[0] = dev };
75 static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter,
76 const struct bch_devs_mask *mask)
78 struct bch_dev *ca = NULL;
81 ? find_next_bit(mask->d, c->sb.nr_devices, *iter)
82 : *iter) < c->sb.nr_devices &&
83 !(ca = rcu_dereference_check(c->devs[*iter],
84 lockdep_is_held(&c->state_lock))))
90 #define __for_each_member_device(ca, c, iter, mask) \
91 for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++)
93 #define for_each_member_device_rcu(ca, c, iter, mask) \
94 __for_each_member_device(ca, c, iter, mask)
96 static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter)
101 if ((ca = __bch2_next_dev(c, iter, NULL)))
102 percpu_ref_get(&ca->ref);
109 * If you break early, you must drop your ref on the current device
111 #define for_each_member_device(ca, c, iter) \
113 (ca = bch2_get_next_dev(c, &(iter))); \
114 percpu_ref_put(&ca->ref), (iter)++)
116 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
123 while ((ca = __bch2_next_dev(c, iter, NULL)) &&
124 (!((1 << ca->mi.state) & state_mask) ||
125 !percpu_ref_tryget(&ca->io_ref)))
132 #define __for_each_online_member(ca, c, iter, state_mask) \
134 (ca = bch2_get_next_online_dev(c, &(iter), state_mask)); \
135 percpu_ref_put(&ca->io_ref), (iter)++)
137 #define for_each_online_member(ca, c, iter) \
138 __for_each_online_member(ca, c, iter, ~0)
140 #define for_each_rw_member(ca, c, iter) \
141 __for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_RW)
143 #define for_each_readable_member(ca, c, iter) \
144 __for_each_online_member(ca, c, iter, \
145 (1 << BCH_MEMBER_STATE_RW)|(1 << BCH_MEMBER_STATE_RO))
148 * If a key exists that references a device, the device won't be going away and
149 * we can omit rcu_read_lock():
151 static inline struct bch_dev *bch_dev_bkey_exists(const struct bch_fs *c, unsigned idx)
153 EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
155 return rcu_dereference_check(c->devs[idx], 1);
158 static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx)
160 EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
162 return rcu_dereference_protected(c->devs[idx],
163 lockdep_is_held(&c->sb_lock) ||
164 lockdep_is_held(&c->state_lock));
167 /* XXX kill, move to struct bch_fs */
168 static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
170 struct bch_devs_mask devs;
174 memset(&devs, 0, sizeof(devs));
175 for_each_online_member(ca, c, i)
176 __set_bit(ca->dev_idx, devs.d);
180 struct bch_fs *bch2_bdev_to_fs(struct block_device *);
181 struct bch_fs *bch2_uuid_to_fs(uuid_le);
182 int bch2_congested(void *, int);
184 bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *,
185 enum bch_member_state, int);
186 int __bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
187 enum bch_member_state, int);
188 int bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
189 enum bch_member_state, int);
191 int bch2_dev_fail(struct bch_dev *, int);
192 int bch2_dev_remove(struct bch_fs *, struct bch_dev *, int);
193 int bch2_dev_add(struct bch_fs *, const char *);
194 int bch2_dev_online(struct bch_fs *, const char *);
195 int bch2_dev_offline(struct bch_fs *, struct bch_dev *, int);
196 int bch2_dev_resize(struct bch_fs *, struct bch_dev *, u64);
198 bool bch2_fs_emergency_read_only(struct bch_fs *);
199 void bch2_fs_read_only(struct bch_fs *);
200 const char *bch2_fs_read_write(struct bch_fs *);
202 void bch2_fs_stop(struct bch_fs *);
204 const char *bch2_fs_start(struct bch_fs *);
205 struct bch_fs *bch2_fs_open(char * const *, unsigned, struct bch_opts);
206 const char *bch2_fs_open_incremental(const char *path);
208 #endif /* _BCACHEFS_SUPER_H */