1 #ifndef _BCACHE_SUPER_H
2 #define _BCACHE_SUPER_H
6 #include "bcachefs_ioctl.h"
8 static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
10 return s >> ca->bucket_bits;
13 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
15 return ((sector_t) b) << ca->bucket_bits;
18 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
20 return s & (ca->mi.bucket_size - 1);
23 static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter)
25 struct bch_dev *ca = NULL;
27 while (*iter < c->sb.nr_devices &&
28 !(ca = rcu_dereference_check(c->devs[*iter],
29 lockdep_is_held(&c->state_lock))))
35 #define __for_each_member_device(ca, c, iter) \
36 for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter))); (iter)++)
38 #define for_each_member_device_rcu(ca, c, iter) \
39 __for_each_member_device(ca, c, iter)
41 static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter)
46 if ((ca = __bch2_next_dev(c, iter)))
47 percpu_ref_get(&ca->ref);
54 * If you break early, you must drop your ref on the current device
56 #define for_each_member_device(ca, c, iter) \
58 (ca = bch2_get_next_dev(c, &(iter))); \
59 percpu_ref_put(&ca->ref), (iter)++)
61 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
68 while ((ca = __bch2_next_dev(c, iter)) &&
69 (!((1 << ca->mi.state) & state_mask) ||
70 !percpu_ref_tryget(&ca->io_ref)))
77 #define __for_each_online_member(ca, c, iter, state_mask) \
79 (ca = bch2_get_next_online_dev(c, &(iter), state_mask)); \
80 percpu_ref_put(&ca->io_ref), (iter)++)
82 #define for_each_online_member(ca, c, iter) \
83 __for_each_online_member(ca, c, iter, ~0)
85 #define for_each_rw_member(ca, c, iter) \
86 __for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_RW)
88 #define for_each_readable_member(ca, c, iter) \
89 __for_each_online_member(ca, c, iter, \
90 (1 << BCH_MEMBER_STATE_RW)|(1 << BCH_MEMBER_STATE_RO))
92 struct bch_fs *bch2_bdev_to_fs(struct block_device *);
93 struct bch_fs *bch2_uuid_to_fs(uuid_le);
94 int bch2_congested(struct bch_fs *, int);
96 bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *,
97 enum bch_member_state, int);
98 int __bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
99 enum bch_member_state, int);
100 int bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
101 enum bch_member_state, int);
103 int bch2_dev_fail(struct bch_dev *, int);
104 int bch2_dev_remove(struct bch_fs *, struct bch_dev *, int);
105 int bch2_dev_add(struct bch_fs *, const char *);
106 int bch2_dev_online(struct bch_fs *, const char *);
107 int bch2_dev_offline(struct bch_fs *, struct bch_dev *, int);
108 int bch2_dev_evacuate(struct bch_fs *, struct bch_dev *);
110 bool bch2_fs_emergency_read_only(struct bch_fs *);
111 void bch2_fs_read_only(struct bch_fs *);
112 const char *bch2_fs_read_write(struct bch_fs *);
114 void bch2_fs_stop(struct bch_fs *);
116 const char *bch2_fs_start(struct bch_fs *);
117 const char *bch2_fs_open(char * const *, unsigned, struct bch_opts,
119 const char *bch2_fs_open_incremental(const char *path);
121 #endif /* _BCACHE_SUPER_H */