]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_locking.h
Update bcachefs sources to 4837f82ee1 bcachefs: Use cached iterators for alloc btree
[bcachefs-tools-debian] / libbcachefs / btree_locking.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_LOCKING_H
3 #define _BCACHEFS_BTREE_LOCKING_H
4
5 /*
6  * Only for internal btree use:
7  *
8  * The btree iterator tracks what locks it wants to take, and what locks it
9  * currently has - here we have wrappers for locking/unlocking btree nodes and
10  * updating the iterator state
11  */
12
13 #include <linux/six.h>
14
15 #include "btree_iter.h"
16
17 /* matches six lock types */
18 enum btree_node_locked_type {
19         BTREE_NODE_UNLOCKED             = -1,
20         BTREE_NODE_READ_LOCKED          = SIX_LOCK_read,
21         BTREE_NODE_INTENT_LOCKED        = SIX_LOCK_intent,
22 };
23
24 static inline int btree_node_locked_type(struct btree_iter *iter,
25                                          unsigned level)
26 {
27         /*
28          * We're relying on the fact that if nodes_intent_locked is set
29          * nodes_locked must be set as well, so that we can compute without
30          * branches:
31          */
32         return BTREE_NODE_UNLOCKED +
33                 ((iter->nodes_locked >> level) & 1) +
34                 ((iter->nodes_intent_locked >> level) & 1);
35 }
36
37 static inline bool btree_node_intent_locked(struct btree_iter *iter,
38                                             unsigned level)
39 {
40         return btree_node_locked_type(iter, level) == BTREE_NODE_INTENT_LOCKED;
41 }
42
43 static inline bool btree_node_read_locked(struct btree_iter *iter,
44                                           unsigned level)
45 {
46         return btree_node_locked_type(iter, level) == BTREE_NODE_READ_LOCKED;
47 }
48
49 static inline bool btree_node_locked(struct btree_iter *iter, unsigned level)
50 {
51         return iter->nodes_locked & (1 << level);
52 }
53
54 static inline void mark_btree_node_unlocked(struct btree_iter *iter,
55                                             unsigned level)
56 {
57         iter->nodes_locked &= ~(1 << level);
58         iter->nodes_intent_locked &= ~(1 << level);
59 }
60
61 static inline void mark_btree_node_locked(struct btree_iter *iter,
62                                           unsigned level,
63                                           enum six_lock_type type)
64 {
65         /* relying on this to avoid a branch */
66         BUILD_BUG_ON(SIX_LOCK_read   != 0);
67         BUILD_BUG_ON(SIX_LOCK_intent != 1);
68
69         iter->nodes_locked |= 1 << level;
70         iter->nodes_intent_locked |= type << level;
71 }
72
73 static inline void mark_btree_node_intent_locked(struct btree_iter *iter,
74                                                  unsigned level)
75 {
76         mark_btree_node_locked(iter, level, SIX_LOCK_intent);
77 }
78
79 static inline enum six_lock_type __btree_lock_want(struct btree_iter *iter, int level)
80 {
81         return level < iter->locks_want
82                 ? SIX_LOCK_intent
83                 : SIX_LOCK_read;
84 }
85
86 static inline enum btree_node_locked_type
87 btree_lock_want(struct btree_iter *iter, int level)
88 {
89         if (level < iter->level)
90                 return BTREE_NODE_UNLOCKED;
91         if (level < iter->locks_want)
92                 return BTREE_NODE_INTENT_LOCKED;
93         if (level == iter->level)
94                 return BTREE_NODE_READ_LOCKED;
95         return BTREE_NODE_UNLOCKED;
96 }
97
98 static inline void __btree_node_unlock(struct btree_iter *iter, unsigned level)
99 {
100         int lock_type = btree_node_locked_type(iter, level);
101
102         EBUG_ON(level >= BTREE_MAX_DEPTH);
103
104         if (lock_type != BTREE_NODE_UNLOCKED)
105                 six_unlock_type(&iter->l[level].b->c.lock, lock_type);
106         mark_btree_node_unlocked(iter, level);
107 }
108
109 static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
110 {
111         EBUG_ON(!level && iter->trans->nounlock);
112
113         __btree_node_unlock(iter, level);
114 }
115
116 static inline void __bch2_btree_iter_unlock(struct btree_iter *iter)
117 {
118         btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
119
120         while (iter->nodes_locked)
121                 btree_node_unlock(iter, __ffs(iter->nodes_locked));
122 }
123
124 static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
125 {
126         switch (type) {
127         case SIX_LOCK_read:
128                 return BCH_TIME_btree_lock_contended_read;
129         case SIX_LOCK_intent:
130                 return BCH_TIME_btree_lock_contended_intent;
131         case SIX_LOCK_write:
132                 return BCH_TIME_btree_lock_contended_write;
133         default:
134                 BUG();
135         }
136 }
137
138 /*
139  * wrapper around six locks that just traces lock contended time
140  */
141 static inline void __btree_node_lock_type(struct bch_fs *c, struct btree *b,
142                                           enum six_lock_type type)
143 {
144         u64 start_time = local_clock();
145
146         six_lock_type(&b->c.lock, type, NULL, NULL);
147         bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
148 }
149
150 static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b,
151                                         enum six_lock_type type)
152 {
153         if (!six_trylock_type(&b->c.lock, type))
154                 __btree_node_lock_type(c, b, type);
155 }
156
157 /*
158  * Lock a btree node if we already have it locked on one of our linked
159  * iterators:
160  */
161 static inline bool btree_node_lock_increment(struct btree_trans *trans,
162                                              struct btree *b, unsigned level,
163                                              enum btree_node_locked_type want)
164 {
165         struct btree_iter *iter;
166
167         trans_for_each_iter(trans, iter)
168                 if (iter->l[level].b == b &&
169                     btree_node_locked_type(iter, level) >= want) {
170                         six_lock_increment(&b->c.lock, want);
171                         return true;
172                 }
173
174         return false;
175 }
176
177 bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned,
178                             struct btree_iter *, enum six_lock_type,
179                             six_lock_should_sleep_fn, void *);
180
181 static inline bool btree_node_lock(struct btree *b,
182                         struct bpos pos, unsigned level,
183                         struct btree_iter *iter,
184                         enum six_lock_type type,
185                         six_lock_should_sleep_fn should_sleep_fn, void *p)
186 {
187         struct btree_trans *trans = iter->trans;
188         bool ret;
189
190         EBUG_ON(level >= BTREE_MAX_DEPTH);
191         EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx)));
192
193 #ifdef CONFIG_BCACHEFS_DEBUG
194         trans->locking          = b;
195         trans->locking_iter_idx = iter->idx;
196         trans->locking_pos      = pos;
197         trans->locking_btree_id = iter->btree_id;
198         trans->locking_level    = level;
199 #endif
200         ret   = likely(six_trylock_type(&b->c.lock, type)) ||
201                 btree_node_lock_increment(trans, b, level, type) ||
202                 __bch2_btree_node_lock(b, pos, level, iter, type,
203                                        should_sleep_fn, p);
204
205 #ifdef CONFIG_BCACHEFS_DEBUG
206         trans->locking = NULL;
207 #endif
208         return ret;
209 }
210
211 bool __bch2_btree_node_relock(struct btree_iter *, unsigned);
212
213 static inline bool bch2_btree_node_relock(struct btree_iter *iter,
214                                           unsigned level)
215 {
216         EBUG_ON(btree_node_locked(iter, level) &&
217                 btree_node_locked_type(iter, level) !=
218                 __btree_lock_want(iter, level));
219
220         return likely(btree_node_locked(iter, level)) ||
221                 __bch2_btree_node_relock(iter, level);
222 }
223
224 /*
225  * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
226  * succeed:
227  */
228 static inline void
229 bch2_btree_node_unlock_write_inlined(struct btree *b, struct btree_iter *iter)
230 {
231         struct btree_iter *linked;
232
233         EBUG_ON(iter->l[b->c.level].b != b);
234         EBUG_ON(iter->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
235
236         trans_for_each_iter_with_node(iter->trans, b, linked)
237                 linked->l[b->c.level].lock_seq += 2;
238
239         six_unlock_write(&b->c.lock);
240 }
241
242 void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);
243
244 void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
245
246 static inline void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
247 {
248         EBUG_ON(iter->l[b->c.level].b != b);
249         EBUG_ON(iter->l[b->c.level].lock_seq != b->c.lock.state.seq);
250
251         if (unlikely(!six_trylock_write(&b->c.lock)))
252                 __bch2_btree_node_lock_write(b, iter);
253 }
254
255 #endif /* _BCACHEFS_BTREE_LOCKING_H */
256
257