1 #ifndef _BCACHE_SUPER_IO_H
2 #define _BCACHE_SUPER_IO_H
5 #include "super_types.h"
7 #include <asm/byteorder.h>
9 struct bch_sb_field *bch_sb_field_get(struct bch_sb *, enum bch_sb_field_types);
11 #define BCH_SB_FIELD_TYPE(_name) \
12 static inline struct bch_sb_field_##_name * \
13 bch_sb_get_##_name(struct bch_sb *sb) \
15 struct bch_sb_field *f = \
16 bch_sb_field_get(sb, BCH_SB_FIELD_##_name); \
18 return container_of_or_null(f, struct bch_sb_field_##_name, field);\
21 BCH_SB_FIELD_TYPE(journal);
22 BCH_SB_FIELD_TYPE(members);
23 BCH_SB_FIELD_TYPE(crypt);
25 static inline bool bch_sb_test_feature(struct bch_sb *sb,
26 enum bch_sb_features f)
31 return le64_to_cpu(sb->features[w]) & (1ULL << b);
34 static inline void bch_sb_set_feature(struct bch_sb *sb,
35 enum bch_sb_features f)
37 if (!bch_sb_test_feature(sb, f)) {
41 le64_add_cpu(&sb->features[w], 1ULL << b);
45 static inline __le64 bch_sb_magic(struct cache_set *c)
48 memcpy(&ret, &c->sb.uuid, sizeof(ret));
52 static inline __u64 jset_magic(struct cache_set *c)
54 return __le64_to_cpu(bch_sb_magic(c) ^ JSET_MAGIC);
57 static inline __u64 pset_magic(struct cache_set *c)
59 return __le64_to_cpu(bch_sb_magic(c) ^ PSET_MAGIC);
62 static inline __u64 bset_magic(struct cache_set *c)
64 return __le64_to_cpu(bch_sb_magic(c) ^ BSET_MAGIC);
67 static inline struct cache_member_cpu cache_mi_to_cpu_mi(struct bch_member *mi)
69 return (struct cache_member_cpu) {
70 .nbuckets = le64_to_cpu(mi->nbuckets),
71 .first_bucket = le16_to_cpu(mi->first_bucket),
72 .bucket_size = le16_to_cpu(mi->bucket_size),
73 .state = BCH_MEMBER_STATE(mi),
74 .tier = BCH_MEMBER_TIER(mi),
75 .has_metadata = BCH_MEMBER_HAS_METADATA(mi),
76 .has_data = BCH_MEMBER_HAS_DATA(mi),
77 .replacement = BCH_MEMBER_REPLACEMENT(mi),
78 .discard = BCH_MEMBER_DISCARD(mi),
79 .valid = !bch_is_zero(mi->uuid.b, sizeof(uuid_le)),
83 int bch_fs_mi_update(struct cache_set *, struct bch_member *, unsigned);
85 int bch_sb_to_cache_set(struct cache_set *, struct bch_sb *);
86 int bch_sb_from_cache_set(struct cache_set *, struct cache *);
88 struct bch_sb_field *bch_fs_sb_field_resize(struct cache_set *,
89 struct bch_sb_field *, unsigned);
90 struct bch_sb_field *bch_dev_sb_field_resize(struct bcache_superblock *,
91 struct bch_sb_field *, unsigned);
93 void bch_free_super(struct bcache_superblock *);
94 int bch_super_realloc(struct bcache_superblock *, unsigned);
96 const char *bch_validate_cache_super(struct bcache_superblock *);
98 const char *bch_read_super(struct bcache_superblock *,
99 struct bch_opts, const char *);
100 void bch_write_super(struct cache_set *);
102 void bch_check_mark_super_slowpath(struct cache_set *,
103 const struct bkey_i *, bool);
105 #define cache_member_info_get(_c) \
106 (rcu_read_lock(), rcu_dereference((_c)->members))
108 #define cache_member_info_put() rcu_read_unlock()
110 static inline bool bch_check_super_marked(struct cache_set *c,
111 const struct bkey_i *k, bool meta)
113 struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
114 const struct bch_extent_ptr *ptr;
115 struct cache_member_cpu *mi = cache_member_info_get(c)->m;
118 extent_for_each_ptr(e, ptr)
121 ? mi[ptr->dev].has_metadata
122 : mi[ptr->dev].has_data)) {
127 cache_member_info_put();
132 static inline void bch_check_mark_super(struct cache_set *c,
133 const struct bkey_i *k, bool meta)
135 if (bch_check_super_marked(c, k, meta))
138 bch_check_mark_super_slowpath(c, k, meta);
141 #endif /* _BCACHE_SUPER_IO_H */