1 #ifndef _BCACHE_SUPER_H
2 #define _BCACHE_SUPER_H
6 #include "bcachefs_ioctl.h"
8 static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
10 return s >> ca->bucket_bits;
13 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
15 return ((sector_t) b) << ca->bucket_bits;
18 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
20 return s & (ca->mi.bucket_size - 1);
23 static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter)
25 struct bch_dev *ca = NULL;
27 while (*iter < c->sb.nr_devices &&
28 !(ca = rcu_dereference_check(c->devs[*iter],
29 lockdep_is_held(&c->state_lock))))
35 static inline bool bch2_dev_is_online(struct bch_dev *ca)
37 return !percpu_ref_is_zero(&ca->io_ref);
40 #define __for_each_member_device(ca, c, iter) \
41 for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter))); (iter)++)
43 #define for_each_member_device_rcu(ca, c, iter) \
44 __for_each_member_device(ca, c, iter)
46 static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter)
51 if ((ca = __bch2_next_dev(c, iter)))
52 percpu_ref_get(&ca->ref);
59 * If you break early, you must drop your ref on the current device
61 #define for_each_member_device(ca, c, iter) \
63 (ca = bch2_get_next_dev(c, &(iter))); \
64 percpu_ref_put(&ca->ref), (iter)++)
66 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
73 while ((ca = __bch2_next_dev(c, iter)) &&
74 (!((1 << ca->mi.state) & state_mask) ||
75 !percpu_ref_tryget(&ca->io_ref)))
82 #define __for_each_online_member(ca, c, iter, state_mask) \
84 (ca = bch2_get_next_online_dev(c, &(iter), state_mask)); \
85 percpu_ref_put(&ca->io_ref), (iter)++)
87 #define for_each_online_member(ca, c, iter) \
88 __for_each_online_member(ca, c, iter, ~0)
90 #define for_each_rw_member(ca, c, iter) \
91 __for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_RW)
93 #define for_each_readable_member(ca, c, iter) \
94 __for_each_online_member(ca, c, iter, \
95 (1 << BCH_MEMBER_STATE_RW)|(1 << BCH_MEMBER_STATE_RO))
97 static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
99 struct bch_devs_mask devs;
103 memset(&devs, 0, sizeof(devs));
104 for_each_online_member(ca, c, i)
105 __set_bit(ca->dev_idx, devs.d);
109 struct bch_fs *bch2_bdev_to_fs(struct block_device *);
110 struct bch_fs *bch2_uuid_to_fs(uuid_le);
111 int bch2_congested(struct bch_fs *, int);
113 bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *,
114 enum bch_member_state, int);
115 int __bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
116 enum bch_member_state, int);
117 int bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
118 enum bch_member_state, int);
120 int bch2_dev_fail(struct bch_dev *, int);
121 int bch2_dev_remove(struct bch_fs *, struct bch_dev *, int);
122 int bch2_dev_add(struct bch_fs *, const char *);
123 int bch2_dev_online(struct bch_fs *, const char *);
124 int bch2_dev_offline(struct bch_fs *, struct bch_dev *, int);
125 int bch2_dev_evacuate(struct bch_fs *, struct bch_dev *);
127 bool bch2_fs_emergency_read_only(struct bch_fs *);
128 void bch2_fs_read_only(struct bch_fs *);
129 const char *bch2_fs_read_write(struct bch_fs *);
131 void bch2_fs_stop(struct bch_fs *);
133 const char *bch2_fs_start(struct bch_fs *);
134 const char *bch2_fs_open(char * const *, unsigned, struct bch_opts,
136 const char *bch2_fs_open_incremental(const char *path);
138 #endif /* _BCACHE_SUPER_H */