X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_io.h;h=e251cb6b965ff0a8bdc4aa0684dfdcaa315c32d6;hb=f154e6ed41e6080d7afe76d5d1209c5cae0acc12;hp=0f20224e2a77cec3070850226ea52cd45ebb3695;hpb=2b8c1bb0910534e8687ea3e5abf6d8bbba758247;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_io.h b/libbcachefs/btree_io.h index 0f20224..e251cb6 100644 --- a/libbcachefs/btree_io.h +++ b/libbcachefs/btree_io.h @@ -7,7 +7,7 @@ #include "btree_locking.h" #include "checksum.h" #include "extents.h" -#include "io_types.h" +#include "io_write_types.h" struct bch_fs; struct btree_write; @@ -15,18 +15,13 @@ struct btree; struct btree_iter; struct btree_node_read_all; -static inline bool btree_node_dirty(struct btree *b) -{ - return test_bit(BTREE_NODE_dirty, &b->flags); -} - -static inline void set_btree_node_dirty(struct bch_fs *c, struct btree *b) +static inline void set_btree_node_dirty_acct(struct bch_fs *c, struct btree *b) { if (!test_and_set_bit(BTREE_NODE_dirty, &b->flags)) atomic_inc(&c->btree_cache.dirty); } -static inline void clear_btree_node_dirty(struct bch_fs *c, struct btree *b) +static inline void clear_btree_node_dirty_acct(struct bch_fs *c, struct btree *b) { if (test_and_clear_bit(BTREE_NODE_dirty, &b->flags)) atomic_dec(&c->btree_cache.dirty); @@ -67,12 +62,6 @@ void __bch2_btree_node_wait_on_write(struct btree *); void bch2_btree_node_wait_on_read(struct btree *); void bch2_btree_node_wait_on_write(struct btree *); -static inline bool btree_node_may_write(struct btree *b) -{ - return list_empty_careful(&b->write_blocked) && - (!b->written || !b->will_make_reachable); -} - enum compact_mode { COMPACT_LAZY, COMPACT_ALL, @@ -111,22 +100,25 @@ static inline struct nonce btree_nonce(struct bset *i, unsigned offset) }}; } -static inline void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset) +static inline int bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset) { struct nonce nonce = btree_nonce(i, offset); + int ret; if (!offset) { struct btree_node *bn = container_of(i, struct btree_node, keys); unsigned bytes = (void *) &bn->keys - (void *) &bn->flags; - bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags, - bytes); + ret = bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, + &bn->flags, bytes); + if (ret) + return ret; nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE)); } - bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data, - vstruct_end(i) - (void *) i->_data); + return bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data, + vstruct_end(i) - (void *) i->_data); } void bch2_btree_sort_into(struct bch_fs *, struct btree *, struct btree *); @@ -137,49 +129,32 @@ void bch2_btree_build_aux_trees(struct btree *); void bch2_btree_init_next(struct btree_trans *, struct btree *); int bch2_btree_node_read_done(struct bch_fs *, struct bch_dev *, - struct btree *, bool); -void bch2_btree_node_read(struct bch_fs *, struct btree *, bool); + struct btree *, bool, bool *); +void bch2_btree_node_read(struct btree_trans *, struct btree *, bool); int bch2_btree_root_read(struct bch_fs *, enum btree_id, const struct bkey_i *, unsigned); -void bch2_btree_complete_write(struct bch_fs *, struct btree *, - struct btree_write *); - -void __bch2_btree_node_write(struct bch_fs *, struct btree *, bool); bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *); +enum btree_write_flags { + __BTREE_WRITE_ONLY_IF_NEED = BTREE_WRITE_TYPE_BITS, + __BTREE_WRITE_ALREADY_STARTED, +}; +#define BTREE_WRITE_ONLY_IF_NEED BIT(__BTREE_WRITE_ONLY_IF_NEED) +#define BTREE_WRITE_ALREADY_STARTED BIT(__BTREE_WRITE_ALREADY_STARTED) + +void __bch2_btree_node_write(struct bch_fs *, struct btree *, unsigned); void bch2_btree_node_write(struct bch_fs *, struct btree *, - enum six_lock_type); + enum six_lock_type, unsigned); static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b, enum six_lock_type lock_held) { - if (b->written && - btree_node_need_write(b) && - btree_node_may_write(b) && - !btree_node_write_in_flight(b)) - bch2_btree_node_write(c, b, lock_held); + bch2_btree_node_write(c, b, lock_held, BTREE_WRITE_ONLY_IF_NEED); } -#define bch2_btree_node_write_cond(_c, _b, cond) \ -do { \ - unsigned long old, new, v = READ_ONCE((_b)->flags); \ - \ - do { \ - old = new = v; \ - \ - if (!(old & (1 << BTREE_NODE_dirty)) || !(cond)) \ - break; \ - \ - new |= (1 << BTREE_NODE_need_write); \ - } while ((v = cmpxchg(&(_b)->flags, old, new)) != old); \ - \ - btree_node_write_if_need(_c, _b, SIX_LOCK_read); \ -} while (0) - -void bch2_btree_flush_all_reads(struct bch_fs *); -void bch2_btree_flush_all_writes(struct bch_fs *); -void bch2_dirty_btree_nodes_to_text(struct printbuf *, struct bch_fs *); +bool bch2_btree_flush_all_reads(struct bch_fs *); +bool bch2_btree_flush_all_writes(struct bch_fs *); static inline void compat_bformat(unsigned level, enum btree_id btree_id, unsigned version, unsigned big_endian, @@ -200,7 +175,7 @@ static inline void compat_bformat(unsigned level, enum btree_id btree_id, f->field_offset[BKEY_FIELD_SNAPSHOT] = write ? 0 - : U32_MAX - max_packed; + : cpu_to_le64(U32_MAX - max_packed); } } @@ -222,8 +197,8 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id, struct btree_node *bn) { if (version < bcachefs_metadata_version_inode_btree_change && - btree_node_type_is_extents(btree_id) && - bpos_cmp(bn->min_key, POS_MIN) && + btree_id_is_extents(btree_id) && + !bpos_eq(bn->min_key, POS_MIN) && write) bn->min_key = bpos_nosnap_predecessor(bn->min_key); @@ -239,10 +214,12 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id, bn->max_key.snapshot = U32_MAX; if (version < bcachefs_metadata_version_inode_btree_change && - btree_node_type_is_extents(btree_id) && - bpos_cmp(bn->min_key, POS_MIN) && + btree_id_is_extents(btree_id) && + !bpos_eq(bn->min_key, POS_MIN) && !write) bn->min_key = bpos_nosnap_successor(bn->min_key); } +void bch2_btree_write_stats_to_text(struct printbuf *, struct bch_fs *); + #endif /* _BCACHEFS_BTREE_IO_H */