1 #ifndef _BCACHE_SUPER_IO_H
2 #define _BCACHE_SUPER_IO_H
6 #include "super_types.h"
8 #include <asm/byteorder.h>
10 struct bch_sb_field *bch2_sb_field_get(struct bch_sb *, enum bch_sb_field_type);
11 struct bch_sb_field *bch2_sb_field_resize(struct bcache_superblock *,
12 enum bch_sb_field_type, unsigned);
13 struct bch_sb_field *bch2_fs_sb_field_resize(struct bch_fs *,
14 enum bch_sb_field_type, unsigned);
16 #define field_to_type(_f, _name) \
17 container_of_or_null(_f, struct bch_sb_field_##_name, field)
19 #define BCH_SB_FIELD_TYPE(_name) \
20 static inline struct bch_sb_field_##_name * \
21 bch2_sb_get_##_name(struct bch_sb *sb) \
23 return field_to_type(bch2_sb_field_get(sb, \
24 BCH_SB_FIELD_##_name), _name); \
27 static inline struct bch_sb_field_##_name * \
28 bch2_sb_resize_##_name(struct bcache_superblock *sb, unsigned u64s) \
30 return field_to_type(bch2_sb_field_resize(sb, \
31 BCH_SB_FIELD_##_name, u64s), _name); \
34 static inline struct bch_sb_field_##_name * \
35 bch2_fs_sb_resize_##_name(struct bch_fs *c, unsigned u64s) \
37 return field_to_type(bch2_fs_sb_field_resize(c, \
38 BCH_SB_FIELD_##_name, u64s), _name); \
41 BCH_SB_FIELD_TYPE(journal);
42 BCH_SB_FIELD_TYPE(members);
43 BCH_SB_FIELD_TYPE(crypt);
44 BCH_SB_FIELD_TYPE(replicas);
46 static inline bool bch2_dev_exists(struct bch_sb *sb,
47 struct bch_sb_field_members *mi,
50 return dev < sb->nr_devices &&
51 !bch2_is_zero(mi->members[dev].uuid.b, sizeof(uuid_le));
54 static inline bool bch2_sb_test_feature(struct bch_sb *sb,
55 enum bch_sb_features f)
60 return le64_to_cpu(sb->features[w]) & (1ULL << b);
63 static inline void bch2_sb_set_feature(struct bch_sb *sb,
64 enum bch_sb_features f)
66 if (!bch2_sb_test_feature(sb, f)) {
70 le64_add_cpu(&sb->features[w], 1ULL << b);
74 static inline __le64 bch2_sb_magic(struct bch_fs *c)
77 memcpy(&ret, &c->sb.uuid, sizeof(ret));
81 static inline __u64 jset_magic(struct bch_fs *c)
83 return __le64_to_cpu(bch2_sb_magic(c) ^ JSET_MAGIC);
86 static inline __u64 pset_magic(struct bch_fs *c)
88 return __le64_to_cpu(bch2_sb_magic(c) ^ PSET_MAGIC);
91 static inline __u64 bset_magic(struct bch_fs *c)
93 return __le64_to_cpu(bch2_sb_magic(c) ^ BSET_MAGIC);
96 static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
98 return (struct bch_member_cpu) {
99 .nbuckets = le64_to_cpu(mi->nbuckets),
100 .first_bucket = le16_to_cpu(mi->first_bucket),
101 .bucket_size = le16_to_cpu(mi->bucket_size),
102 .state = BCH_MEMBER_STATE(mi),
103 .tier = BCH_MEMBER_TIER(mi),
104 .replacement = BCH_MEMBER_REPLACEMENT(mi),
105 .discard = BCH_MEMBER_DISCARD(mi),
106 .valid = !bch2_is_zero(mi->uuid.b, sizeof(uuid_le)),
110 int bch2_sb_to_fs(struct bch_fs *, struct bch_sb *);
111 int bch2_sb_from_fs(struct bch_fs *, struct bch_dev *);
113 void bch2_free_super(struct bcache_superblock *);
114 int bch2_super_realloc(struct bcache_superblock *, unsigned);
116 const char *bch2_sb_validate_journal(struct bch_sb *,
117 struct bch_member_cpu);
118 const char *bch2_sb_validate(struct bcache_superblock *);
120 const char *bch2_read_super(struct bcache_superblock *,
121 struct bch_opts, const char *);
122 void bch2_write_super(struct bch_fs *);
124 static inline bool replicas_test_dev(struct bch_replicas_cpu_entry *e,
127 return (e->devs[dev >> 3] & (1 << (dev & 7))) != 0;
130 static inline void replicas_set_dev(struct bch_replicas_cpu_entry *e,
133 e->devs[dev >> 3] |= 1 << (dev & 7);
136 static inline unsigned replicas_dev_slots(struct bch_replicas_cpu *r)
138 return (r->entry_size -
139 offsetof(struct bch_replicas_cpu_entry, devs)) * 8;
142 static inline struct bch_replicas_cpu_entry *
143 cpu_replicas_entry(struct bch_replicas_cpu *r, unsigned i)
145 return (void *) r->entries + r->entry_size * i;
148 int bch2_check_mark_super_slowpath(struct bch_fs *, struct bkey_s_c_extent,
149 enum bch_data_types);
151 static inline bool replicas_has_extent(struct bch_replicas_cpu *r,
152 struct bkey_s_c_extent e,
153 enum bch_data_types data_type)
155 const struct bch_extent_ptr *ptr;
156 struct bch_replicas_cpu_entry search = {
157 .data_type = data_type,
159 unsigned max_dev = 0;
162 data_type == BCH_DATA_SB ||
163 data_type >= BCH_DATA_NR);
165 extent_for_each_ptr(e, ptr)
167 max_dev = max_t(unsigned, max_dev, ptr->dev);
168 replicas_set_dev(&search, ptr->dev);
171 return max_dev < replicas_dev_slots(r) &&
172 eytzinger0_find(r->entries, r->nr,
174 memcmp, &search) < r->nr;
177 static inline bool bch2_sb_has_replicas(struct bch_fs *c,
178 struct bkey_s_c_extent e,
179 enum bch_data_types data_type)
184 ret = replicas_has_extent(rcu_dereference(c->replicas),
191 static inline int bch2_check_mark_super(struct bch_fs *c,
192 struct bkey_s_c_extent e,
193 enum bch_data_types data_type)
195 struct bch_replicas_cpu *gc_r;
199 marked = replicas_has_extent(rcu_dereference(c->replicas),
201 (!(gc_r = rcu_dereference(c->replicas_gc)) ||
202 replicas_has_extent(gc_r, e, data_type));
208 return bch2_check_mark_super_slowpath(c, e, data_type);
211 struct replicas_status {
215 } replicas[BCH_DATA_NR];
218 struct replicas_status __bch2_replicas_status(struct bch_fs *,
220 struct replicas_status bch2_replicas_status(struct bch_fs *);
222 unsigned bch2_replicas_online(struct bch_fs *, bool);
223 unsigned bch2_dev_has_data(struct bch_fs *, struct bch_dev *);
225 int bch2_replicas_gc_end(struct bch_fs *, int);
226 int bch2_replicas_gc_start(struct bch_fs *, unsigned);
228 #endif /* _BCACHE_SUPER_IO_H */