]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/super.h
Update bcachefs sources to 6a25f7a00d bcachefs: fix ioctl code
[bcachefs-tools-debian] / libbcachefs / super.h
1 #ifndef _BCACHE_SUPER_H
2 #define _BCACHE_SUPER_H
3
4 #include "extents.h"
5
6 #include "bcachefs_ioctl.h"
7
8 static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
9 {
10         return s >> ca->bucket_bits;
11 }
12
13 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
14 {
15         return ((sector_t) b) << ca->bucket_bits;
16 }
17
18 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
19 {
20         return s & (ca->mi.bucket_size - 1);
21 }
22
23 static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter)
24 {
25         struct bch_dev *ca = NULL;
26
27         while (*iter < c->sb.nr_devices &&
28                !(ca = rcu_dereference_check(c->devs[*iter],
29                                             lockdep_is_held(&c->state_lock))))
30                 (*iter)++;
31
32         return ca;
33 }
34
35 static inline bool bch2_dev_is_online(struct bch_dev *ca)
36 {
37         return !percpu_ref_is_zero(&ca->io_ref);
38 }
39
40 #define __for_each_member_device(ca, c, iter)                           \
41         for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter))); (iter)++)
42
43 #define for_each_member_device_rcu(ca, c, iter)                         \
44         __for_each_member_device(ca, c, iter)
45
46 static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter)
47 {
48         struct bch_dev *ca;
49
50         rcu_read_lock();
51         if ((ca = __bch2_next_dev(c, iter)))
52                 percpu_ref_get(&ca->ref);
53         rcu_read_unlock();
54
55         return ca;
56 }
57
58 /*
59  * If you break early, you must drop your ref on the current device
60  */
61 #define for_each_member_device(ca, c, iter)                             \
62         for ((iter) = 0;                                                \
63              (ca = bch2_get_next_dev(c, &(iter)));                      \
64              percpu_ref_put(&ca->ref), (iter)++)
65
66 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
67                                                       unsigned *iter,
68                                                       int state_mask)
69 {
70         struct bch_dev *ca;
71
72         rcu_read_lock();
73         while ((ca = __bch2_next_dev(c, iter)) &&
74                (!((1 << ca->mi.state) & state_mask) ||
75                 !percpu_ref_tryget(&ca->io_ref)))
76                 (*iter)++;
77         rcu_read_unlock();
78
79         return ca;
80 }
81
82 #define __for_each_online_member(ca, c, iter, state_mask)               \
83         for ((iter) = 0;                                                \
84              (ca = bch2_get_next_online_dev(c, &(iter), state_mask));   \
85              percpu_ref_put(&ca->io_ref), (iter)++)
86
87 #define for_each_online_member(ca, c, iter)                             \
88         __for_each_online_member(ca, c, iter, ~0)
89
90 #define for_each_rw_member(ca, c, iter)                                 \
91         __for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_RW)
92
93 #define for_each_readable_member(ca, c, iter)                           \
94         __for_each_online_member(ca, c, iter,                           \
95                 (1 << BCH_MEMBER_STATE_RW)|(1 << BCH_MEMBER_STATE_RO))
96
97 static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
98 {
99         struct bch_devs_mask devs;
100         struct bch_dev *ca;
101         unsigned i;
102
103         memset(&devs, 0, sizeof(devs));
104         for_each_online_member(ca, c, i)
105                 __set_bit(ca->dev_idx, devs.d);
106         return devs;
107 }
108
109 struct bch_fs *bch2_bdev_to_fs(struct block_device *);
110 struct bch_fs *bch2_uuid_to_fs(uuid_le);
111 int bch2_congested(struct bch_fs *, int);
112
113 bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *,
114                            enum bch_member_state, int);
115 int __bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
116                         enum bch_member_state, int);
117 int bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
118                       enum bch_member_state, int);
119
120 int bch2_dev_fail(struct bch_dev *, int);
121 int bch2_dev_remove(struct bch_fs *, struct bch_dev *, int);
122 int bch2_dev_add(struct bch_fs *, const char *);
123 int bch2_dev_online(struct bch_fs *, const char *);
124 int bch2_dev_offline(struct bch_fs *, struct bch_dev *, int);
125 int bch2_dev_evacuate(struct bch_fs *, struct bch_dev *);
126
127 bool bch2_fs_emergency_read_only(struct bch_fs *);
128 void bch2_fs_read_only(struct bch_fs *);
129 const char *bch2_fs_read_write(struct bch_fs *);
130
131 void bch2_fs_stop(struct bch_fs *);
132
133 const char *bch2_fs_start(struct bch_fs *);
134 const char *bch2_fs_open(char * const *, unsigned, struct bch_opts,
135                         struct bch_fs **);
136 const char *bch2_fs_open_incremental(const char *path);
137
138 #endif /* _BCACHE_SUPER_H */