]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_io.h
Merge pull request #24 from brendon-boldt/new-install-distros
[bcachefs-tools-debian] / libbcachefs / btree_io.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_IO_H
3 #define _BCACHEFS_BTREE_IO_H
4
5 #include "bset.h"
6 #include "btree_locking.h"
7 #include "extents.h"
8 #include "io_types.h"
9
10 struct bch_fs;
11 struct btree_write;
12 struct btree;
13 struct btree_iter;
14
15 struct btree_read_bio {
16         struct bch_fs           *c;
17         u64                     start_time;
18         unsigned                have_ioref:1;
19         struct extent_ptr_decoded       pick;
20         struct work_struct      work;
21         struct bio              bio;
22 };
23
24 struct btree_write_bio {
25         void                    *data;
26         struct work_struct      work;
27         struct bch_write_bio    wbio;
28 };
29
30 static inline void btree_node_io_unlock(struct btree *b)
31 {
32         EBUG_ON(!btree_node_write_in_flight(b));
33         clear_btree_node_write_in_flight(b);
34         wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
35 }
36
37 static inline void btree_node_io_lock(struct btree *b)
38 {
39         wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
40                             TASK_UNINTERRUPTIBLE);
41 }
42
43 static inline void btree_node_wait_on_io(struct btree *b)
44 {
45         wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
46                        TASK_UNINTERRUPTIBLE);
47 }
48
49 static inline bool btree_node_may_write(struct btree *b)
50 {
51         return list_empty_careful(&b->write_blocked) &&
52                 (!b->written || !b->will_make_reachable);
53 }
54
55 enum compact_mode {
56         COMPACT_LAZY,
57         COMPACT_ALL,
58 };
59
60 bool bch2_compact_whiteouts(struct bch_fs *, struct btree *,
61                             enum compact_mode);
62
63 static inline bool should_compact_bset_lazy(struct btree *b,
64                                             struct bset_tree *t)
65 {
66         unsigned total_u64s = bset_u64s(t);
67         unsigned dead_u64s = bset_dead_u64s(b, t);
68
69         return dead_u64s > 64 && dead_u64s * 3 > total_u64s;
70 }
71
72 static inline bool bch2_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b)
73 {
74         struct bset_tree *t;
75
76         for_each_bset(b, t)
77                 if (should_compact_bset_lazy(b, t))
78                         return bch2_compact_whiteouts(c, b, COMPACT_LAZY);
79
80         return false;
81 }
82
83 void bch2_btree_sort_into(struct bch_fs *, struct btree *, struct btree *);
84
85 void bch2_btree_build_aux_trees(struct btree *);
86 void bch2_btree_init_next(struct bch_fs *, struct btree *,
87                          struct btree_iter *);
88
89 int bch2_btree_node_read_done(struct bch_fs *, struct btree *, bool);
90 void bch2_btree_node_read(struct bch_fs *, struct btree *, bool);
91 int bch2_btree_root_read(struct bch_fs *, enum btree_id,
92                          const struct bkey_i *, unsigned);
93
94 void bch2_btree_complete_write(struct bch_fs *, struct btree *,
95                               struct btree_write *);
96 void bch2_btree_write_error_work(struct work_struct *);
97
98 void __bch2_btree_node_write(struct bch_fs *, struct btree *,
99                             enum six_lock_type);
100 bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
101
102 void bch2_btree_node_write(struct bch_fs *, struct btree *,
103                           enum six_lock_type);
104
105 static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b)
106 {
107         while (b->written &&
108                btree_node_need_write(b) &&
109                btree_node_may_write(b)) {
110                 if (!btree_node_write_in_flight(b)) {
111                         bch2_btree_node_write(c, b, SIX_LOCK_read);
112                         break;
113                 }
114
115                 six_unlock_read(&b->lock);
116                 btree_node_wait_on_io(b);
117                 btree_node_lock_type(c, b, SIX_LOCK_read);
118         }
119 }
120
121 #define bch2_btree_node_write_cond(_c, _b, cond)                        \
122 do {                                                                    \
123         unsigned long old, new, v = READ_ONCE((_b)->flags);             \
124                                                                         \
125         do {                                                            \
126                 old = new = v;                                          \
127                                                                         \
128                 if (!(old & (1 << BTREE_NODE_dirty)) || !(cond))        \
129                         break;                                          \
130                                                                         \
131                 new |= (1 << BTREE_NODE_need_write);                    \
132         } while ((v = cmpxchg(&(_b)->flags, old, new)) != old);         \
133                                                                         \
134         btree_node_write_if_need(_c, _b);                               \
135 } while (0)
136
137 void bch2_btree_flush_all_reads(struct bch_fs *);
138 void bch2_btree_flush_all_writes(struct bch_fs *);
139 void bch2_btree_verify_flushed(struct bch_fs *);
140 ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *, char *);
141
142 #endif /* _BCACHEFS_BTREE_IO_H */