]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcache/super.h
79da390e601a1a2bd45c59c158ddb393469b87ce
[bcachefs-tools-debian] / libbcache / super.h
1 #ifndef _BCACHE_SUPER_H
2 #define _BCACHE_SUPER_H
3
4 #include "extents.h"
5
6 #include <linux/bcache-ioctl.h>
7
8 static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
9 {
10         return s >> ca->bucket_bits;
11 }
12
13 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
14 {
15         return ((sector_t) b) << ca->bucket_bits;
16 }
17
18 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
19 {
20         return s & (ca->mi.bucket_size - 1);
21 }
22
23 static inline struct bch_dev *__bch_next_dev(struct bch_fs *c, unsigned *iter)
24 {
25         struct bch_dev *ca = NULL;
26
27         while (*iter < c->sb.nr_devices &&
28                !(ca = rcu_dereference_check(c->devs[*iter],
29                                             lockdep_is_held(&c->state_lock))))
30                 (*iter)++;
31
32         return ca;
33 }
34
35 #define __for_each_member_device(ca, c, iter)                           \
36         for ((iter) = 0; ((ca) = __bch_next_dev((c), &(iter))); (iter)++)
37
38 #define for_each_member_device_rcu(ca, c, iter)                         \
39         __for_each_member_device(ca, c, iter)
40
41 static inline struct bch_dev *bch_get_next_dev(struct bch_fs *c, unsigned *iter)
42 {
43         struct bch_dev *ca;
44
45         rcu_read_lock();
46         if ((ca = __bch_next_dev(c, iter)))
47                 percpu_ref_get(&ca->ref);
48         rcu_read_unlock();
49
50         return ca;
51 }
52
53 /*
54  * If you break early, you must drop your ref on the current device
55  */
56 #define for_each_member_device(ca, c, iter)                             \
57         for ((iter) = 0;                                                \
58              (ca = bch_get_next_dev(c, &(iter)));                       \
59              percpu_ref_put(&ca->ref), (iter)++)
60
61 static inline struct bch_dev *bch_get_next_online_dev(struct bch_fs *c,
62                                                       unsigned *iter,
63                                                       int state_mask)
64 {
65         struct bch_dev *ca;
66
67         rcu_read_lock();
68         while ((ca = __bch_next_dev(c, iter)) &&
69                (!((1 << ca->mi.state) & state_mask) ||
70                 !percpu_ref_tryget(&ca->io_ref)))
71                 (*iter)++;
72         rcu_read_unlock();
73
74         return ca;
75 }
76
77 #define __for_each_online_member(ca, c, iter, state_mask)               \
78         for ((iter) = 0;                                                \
79              (ca = bch_get_next_online_dev(c, &(iter), state_mask));    \
80              percpu_ref_put(&ca->io_ref), (iter)++)
81
82 #define for_each_online_member(ca, c, iter)                             \
83         __for_each_online_member(ca, c, iter, ~0)
84
85 #define for_each_rw_member(ca, c, iter)                                 \
86         __for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_RW)
87
88 #define for_each_readable_member(ca, c, iter)                           \
89         __for_each_online_member(ca, c, iter,                           \
90                 (1 << BCH_MEMBER_STATE_RW)|(1 << BCH_MEMBER_STATE_RO))
91
92 struct bch_fs *bch_bdev_to_fs(struct block_device *);
93 struct bch_fs *bch_uuid_to_fs(uuid_le);
94 int bch_congested(struct bch_fs *, int);
95
96 void bch_dev_release(struct kobject *);
97
98 bool bch_dev_state_allowed(struct bch_fs *, struct bch_dev *,
99                            enum bch_member_state, int);
100 int __bch_dev_set_state(struct bch_fs *, struct bch_dev *,
101                         enum bch_member_state, int);
102 int bch_dev_set_state(struct bch_fs *, struct bch_dev *,
103                       enum bch_member_state, int);
104
105 int bch_dev_fail(struct bch_dev *, int);
106 int bch_dev_remove(struct bch_fs *, struct bch_dev *, int);
107 int bch_dev_add(struct bch_fs *, const char *);
108 int bch_dev_online(struct bch_fs *, const char *);
109 int bch_dev_offline(struct bch_fs *, struct bch_dev *, int);
110 int bch_dev_migrate(struct bch_fs *, struct bch_dev *);
111
112 void bch_fs_detach(struct bch_fs *);
113
114 bool bch_fs_emergency_read_only(struct bch_fs *);
115 void bch_fs_read_only(struct bch_fs *);
116 const char *bch_fs_read_write(struct bch_fs *);
117
118 void bch_fs_release(struct kobject *);
119 void bch_fs_stop_async(struct bch_fs *);
120 void bch_fs_stop(struct bch_fs *);
121
122 const char *bch_fs_start(struct bch_fs *);
123 const char *bch_fs_open(char * const *, unsigned, struct bch_opts,
124                         struct bch_fs **);
125 const char *bch_fs_open_incremental(const char *path);
126
127 extern struct workqueue_struct *bcache_io_wq;
128 extern struct crypto_shash *bch_sha256;
129
130 extern struct kobj_type bch_fs_ktype;
131 extern struct kobj_type bch_fs_internal_ktype;
132 extern struct kobj_type bch_fs_time_stats_ktype;
133 extern struct kobj_type bch_fs_opts_dir_ktype;
134 extern struct kobj_type bch_dev_ktype;
135
136 #endif /* _BCACHE_SUPER_H */