1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_IO_H
3 #define _BCACHEFS_BTREE_IO_H
5 #include "bkey_methods.h"
7 #include "btree_locking.h"
16 struct btree_node_read_all;
18 static inline void set_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
20 if (!test_and_set_bit(BTREE_NODE_dirty, &b->flags))
21 atomic_inc(&c->btree_cache.dirty);
24 static inline void clear_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
26 if (test_and_clear_bit(BTREE_NODE_dirty, &b->flags))
27 atomic_dec(&c->btree_cache.dirty);
30 static inline unsigned btree_ptr_sectors_written(struct bkey_i *k)
32 return k->k.type == KEY_TYPE_btree_ptr_v2
33 ? le16_to_cpu(bkey_i_to_btree_ptr_v2(k)->v.sectors_written)
37 struct btree_read_bio {
40 struct btree_node_read_all *ra;
42 unsigned have_ioref:1;
44 struct extent_ptr_decoded pick;
45 struct work_struct work;
49 struct btree_write_bio {
50 struct work_struct work;
51 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
54 unsigned sector_offset;
55 struct bch_write_bio wbio;
58 void bch2_btree_node_io_unlock(struct btree *);
59 void bch2_btree_node_io_lock(struct btree *);
60 void __bch2_btree_node_wait_on_read(struct btree *);
61 void __bch2_btree_node_wait_on_write(struct btree *);
62 void bch2_btree_node_wait_on_read(struct btree *);
63 void bch2_btree_node_wait_on_write(struct btree *);
70 bool bch2_compact_whiteouts(struct bch_fs *, struct btree *,
73 static inline bool should_compact_bset_lazy(struct btree *b,
76 unsigned total_u64s = bset_u64s(t);
77 unsigned dead_u64s = bset_dead_u64s(b, t);
79 return dead_u64s > 64 && dead_u64s * 3 > total_u64s;
82 static inline bool bch2_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b)
87 if (should_compact_bset_lazy(b, t))
88 return bch2_compact_whiteouts(c, b, COMPACT_LAZY);
93 static inline struct nonce btree_nonce(struct bset *i, unsigned offset)
95 return (struct nonce) {{
96 [0] = cpu_to_le32(offset),
97 [1] = ((__le32 *) &i->seq)[0],
98 [2] = ((__le32 *) &i->seq)[1],
99 [3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
103 static inline int bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
105 struct nonce nonce = btree_nonce(i, offset);
109 struct btree_node *bn = container_of(i, struct btree_node, keys);
110 unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
112 ret = bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce,
117 nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE));
120 return bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
121 vstruct_end(i) - (void *) i->_data);
124 void bch2_btree_sort_into(struct bch_fs *, struct btree *, struct btree *);
126 void bch2_btree_node_drop_keys_outside_node(struct btree *);
128 void bch2_btree_build_aux_trees(struct btree *);
129 void bch2_btree_init_next(struct btree_trans *, struct btree *);
131 int bch2_btree_node_read_done(struct bch_fs *, struct bch_dev *,
132 struct btree *, bool);
133 void bch2_btree_node_read(struct bch_fs *, struct btree *, bool);
134 int bch2_btree_root_read(struct bch_fs *, enum btree_id,
135 const struct bkey_i *, unsigned);
137 void bch2_btree_complete_write(struct bch_fs *, struct btree *,
138 struct btree_write *);
140 bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
142 #define BTREE_WRITE_ONLY_IF_NEED (1U << 0)
143 #define BTREE_WRITE_ALREADY_STARTED (1U << 1)
145 void __bch2_btree_node_write(struct bch_fs *, struct btree *, unsigned);
146 void bch2_btree_node_write(struct bch_fs *, struct btree *,
147 enum six_lock_type, unsigned);
149 static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b,
150 enum six_lock_type lock_held)
152 bch2_btree_node_write(c, b, lock_held, BTREE_WRITE_ONLY_IF_NEED);
155 bool bch2_btree_flush_all_reads(struct bch_fs *);
156 bool bch2_btree_flush_all_writes(struct bch_fs *);
158 static inline void compat_bformat(unsigned level, enum btree_id btree_id,
159 unsigned version, unsigned big_endian,
160 int write, struct bkey_format *f)
162 if (version < bcachefs_metadata_version_inode_btree_change &&
163 btree_id == BTREE_ID_inodes) {
164 swap(f->bits_per_field[BKEY_FIELD_INODE],
165 f->bits_per_field[BKEY_FIELD_OFFSET]);
166 swap(f->field_offset[BKEY_FIELD_INODE],
167 f->field_offset[BKEY_FIELD_OFFSET]);
170 if (version < bcachefs_metadata_version_snapshot &&
171 (level || btree_type_has_snapshots(btree_id))) {
173 ~(~0ULL << f->bits_per_field[BKEY_FIELD_SNAPSHOT]);
175 f->field_offset[BKEY_FIELD_SNAPSHOT] = write
177 : U32_MAX - max_packed;
181 static inline void compat_bpos(unsigned level, enum btree_id btree_id,
182 unsigned version, unsigned big_endian,
183 int write, struct bpos *p)
185 if (big_endian != CPU_BIG_ENDIAN)
188 if (version < bcachefs_metadata_version_inode_btree_change &&
189 btree_id == BTREE_ID_inodes)
190 swap(p->inode, p->offset);
193 static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
194 unsigned version, unsigned big_endian,
196 struct btree_node *bn)
198 if (version < bcachefs_metadata_version_inode_btree_change &&
199 btree_node_type_is_extents(btree_id) &&
200 bpos_cmp(bn->min_key, POS_MIN) &&
202 bn->min_key = bpos_nosnap_predecessor(bn->min_key);
204 if (version < bcachefs_metadata_version_snapshot &&
206 bn->max_key.snapshot = 0;
208 compat_bpos(level, btree_id, version, big_endian, write, &bn->min_key);
209 compat_bpos(level, btree_id, version, big_endian, write, &bn->max_key);
211 if (version < bcachefs_metadata_version_snapshot &&
213 bn->max_key.snapshot = U32_MAX;
215 if (version < bcachefs_metadata_version_inode_btree_change &&
216 btree_node_type_is_extents(btree_id) &&
217 bpos_cmp(bn->min_key, POS_MIN) &&
219 bn->min_key = bpos_nosnap_successor(bn->min_key);
222 #endif /* _BCACHEFS_BTREE_IO_H */