1 #ifndef _BCACHE_BTREE_IO_H
2 #define _BCACHE_BTREE_IO_H
9 static inline void btree_node_io_unlock(struct btree *b)
11 EBUG_ON(!btree_node_write_in_flight(b));
12 clear_btree_node_write_in_flight(b);
13 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
16 static inline void btree_node_io_lock(struct btree *b)
18 wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
19 TASK_UNINTERRUPTIBLE);
25 COMPACT_WRITTEN_NO_WRITE_LOCK,
28 bool __bch_compact_whiteouts(struct cache_set *, struct btree *, enum compact_mode);
30 static inline bool bch_maybe_compact_whiteouts(struct cache_set *c, struct btree *b)
35 unsigned live_u64s = b->nr.bset_u64s[t - b->set];
36 unsigned bset_u64s = le16_to_cpu(bset(b, t)->u64s);
38 if (live_u64s * 4 < bset_u64s * 3)
44 return __bch_compact_whiteouts(c, b, COMPACT_LAZY);
47 void bch_btree_sort_into(struct cache_set *, struct btree *, struct btree *);
49 void bch_btree_build_aux_trees(struct btree *);
50 void bch_btree_init_next(struct cache_set *, struct btree *,
53 void bch_btree_node_read_done(struct cache_set *, struct btree *,
54 struct cache *, const struct bch_extent_ptr *);
55 void bch_btree_node_read(struct cache_set *, struct btree *);
56 int bch_btree_root_read(struct cache_set *, enum btree_id,
57 const struct bkey_i *, unsigned);
59 void bch_btree_complete_write(struct cache_set *, struct btree *,
60 struct btree_write *);
62 void __bch_btree_node_write(struct cache_set *, struct btree *,
63 struct closure *, enum six_lock_type, int);
64 bool bch_btree_post_write_cleanup(struct cache_set *, struct btree *);
66 void bch_btree_node_write(struct cache_set *, struct btree *,
67 struct closure *, enum six_lock_type, int);
69 void bch_btree_flush(struct cache_set *);
70 void bch_btree_node_flush_journal_entries(struct cache_set *, struct btree *,
73 #endif /* _BCACHE_BTREE_IO_H */