]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_locking.h
Update bcachefs sources to 90a9c61e2b bcachefs: Switch bch2_btree_delete_range()...
[bcachefs-tools-debian] / libbcachefs / btree_locking.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_LOCKING_H
3 #define _BCACHEFS_BTREE_LOCKING_H
4
5 /*
6  * Only for internal btree use:
7  *
8  * The btree iterator tracks what locks it wants to take, and what locks it
9  * currently has - here we have wrappers for locking/unlocking btree nodes and
10  * updating the iterator state
11  */
12
13 #include <linux/six.h>
14
15 #include "btree_iter.h"
16
17 static inline bool is_btree_node(struct btree_path *path, unsigned l)
18 {
19         return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
20 }
21
22 /* matches six lock types */
23 enum btree_node_locked_type {
24         BTREE_NODE_UNLOCKED             = -1,
25         BTREE_NODE_READ_LOCKED          = SIX_LOCK_read,
26         BTREE_NODE_INTENT_LOCKED        = SIX_LOCK_intent,
27 };
28
29 static inline int btree_node_locked_type(struct btree_path *path,
30                                          unsigned level)
31 {
32         /*
33          * We're relying on the fact that if nodes_intent_locked is set
34          * nodes_locked must be set as well, so that we can compute without
35          * branches:
36          */
37         return BTREE_NODE_UNLOCKED +
38                 ((path->nodes_locked >> level) & 1) +
39                 ((path->nodes_intent_locked >> level) & 1);
40 }
41
42 static inline bool btree_node_intent_locked(struct btree_path *path,
43                                             unsigned level)
44 {
45         return btree_node_locked_type(path, level) == BTREE_NODE_INTENT_LOCKED;
46 }
47
48 static inline bool btree_node_read_locked(struct btree_path *path,
49                                           unsigned level)
50 {
51         return btree_node_locked_type(path, level) == BTREE_NODE_READ_LOCKED;
52 }
53
54 static inline bool btree_node_locked(struct btree_path *path, unsigned level)
55 {
56         return path->nodes_locked & (1 << level);
57 }
58
59 static inline void mark_btree_node_unlocked(struct btree_path *path,
60                                             unsigned level)
61 {
62         path->nodes_locked &= ~(1 << level);
63         path->nodes_intent_locked &= ~(1 << level);
64 }
65
66 static inline void mark_btree_node_locked_noreset(struct btree_trans *trans,
67                                           struct btree_path *path,
68                                           unsigned level,
69                                           enum six_lock_type type)
70 {
71         /* relying on this to avoid a branch */
72         BUILD_BUG_ON(SIX_LOCK_read   != 0);
73         BUILD_BUG_ON(SIX_LOCK_intent != 1);
74
75         BUG_ON(trans->in_traverse_all && path->sorted_idx > trans->traverse_all_idx);
76
77         path->nodes_locked |= 1 << level;
78         path->nodes_intent_locked |= type << level;
79 }
80
81 static inline void mark_btree_node_locked(struct btree_trans *trans,
82                                           struct btree_path *path,
83                                           unsigned level,
84                                           enum six_lock_type type)
85 {
86         mark_btree_node_locked_noreset(trans, path, level, type);
87 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
88         path->l[level].lock_taken_time = ktime_get_ns();
89 #endif
90 }
91
92 static inline void mark_btree_node_intent_locked(struct btree_trans *trans,
93                                                  struct btree_path *path,
94                                                  unsigned level)
95 {
96         mark_btree_node_locked_noreset(trans, path, level, SIX_LOCK_intent);
97 }
98
99 static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
100 {
101         return level < path->locks_want
102                 ? SIX_LOCK_intent
103                 : SIX_LOCK_read;
104 }
105
106 static inline enum btree_node_locked_type
107 btree_lock_want(struct btree_path *path, int level)
108 {
109         if (level < path->level)
110                 return BTREE_NODE_UNLOCKED;
111         if (level < path->locks_want)
112                 return BTREE_NODE_INTENT_LOCKED;
113         if (level == path->level)
114                 return BTREE_NODE_READ_LOCKED;
115         return BTREE_NODE_UNLOCKED;
116 }
117
118 static inline void btree_node_unlock(struct btree_trans *trans,
119                                      struct btree_path *path, unsigned level)
120 {
121         int lock_type = btree_node_locked_type(path, level);
122
123         EBUG_ON(level >= BTREE_MAX_DEPTH);
124
125         if (lock_type != BTREE_NODE_UNLOCKED) {
126                 six_unlock_type(&path->l[level].b->c.lock, lock_type);
127 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
128                 if (trans->lock_name_idx < BCH_LOCK_TIME_NR) {
129                         struct bch_fs *c = trans->c;
130
131                         __bch2_time_stats_update(&c->lock_held_stats.times[trans->lock_name_idx],
132                                                path->l[level].lock_taken_time,
133                                                  ktime_get_ns());
134                 }
135 #endif
136         }
137         mark_btree_node_unlocked(path, level);
138 }
139
140 static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
141                                             struct btree_path *path)
142 {
143         btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
144
145         while (path->nodes_locked)
146                 btree_node_unlock(trans, path, __ffs(path->nodes_locked));
147 }
148
149 static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
150 {
151         switch (type) {
152         case SIX_LOCK_read:
153                 return BCH_TIME_btree_lock_contended_read;
154         case SIX_LOCK_intent:
155                 return BCH_TIME_btree_lock_contended_intent;
156         case SIX_LOCK_write:
157                 return BCH_TIME_btree_lock_contended_write;
158         default:
159                 BUG();
160         }
161 }
162
163 static inline int btree_node_lock_type(struct btree_trans *trans,
164                                        struct btree_path *path,
165                                        struct btree *b,
166                                        struct bpos pos, unsigned level,
167                                        enum six_lock_type type,
168                                        six_lock_should_sleep_fn should_sleep_fn, void *p)
169 {
170         struct bch_fs *c = trans->c;
171         u64 start_time;
172         int ret;
173
174         if (six_trylock_type(&b->c.lock, type))
175                 return 0;
176
177         start_time = local_clock();
178
179         trans->locking_path_idx = path->idx;
180         trans->locking_pos      = pos;
181         trans->locking_btree_id = path->btree_id;
182         trans->locking_level    = level;
183         trans->locking_lock_type = type;
184         trans->locking          = &b->c;
185         ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p);
186         trans->locking = NULL;
187
188         if (ret)
189                 return ret;
190
191         bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
192         return 0;
193 }
194
195 /*
196  * Lock a btree node if we already have it locked on one of our linked
197  * iterators:
198  */
199 static inline bool btree_node_lock_increment(struct btree_trans *trans,
200                                              struct btree *b, unsigned level,
201                                              enum btree_node_locked_type want)
202 {
203         struct btree_path *path;
204
205         trans_for_each_path(trans, path)
206                 if (path->l[level].b == b &&
207                     btree_node_locked_type(path, level) >= want) {
208                         six_lock_increment(&b->c.lock, want);
209                         return true;
210                 }
211
212         return false;
213 }
214
215 int __bch2_btree_node_lock(struct btree_trans *, struct btree_path *,
216                            struct btree *, struct bpos, unsigned,
217                            enum six_lock_type,
218                            six_lock_should_sleep_fn, void *,
219                            unsigned long);
220
221 static inline int btree_node_lock(struct btree_trans *trans,
222                         struct btree_path *path,
223                         struct btree *b, struct bpos pos, unsigned level,
224                         enum six_lock_type type,
225                         six_lock_should_sleep_fn should_sleep_fn, void *p,
226                         unsigned long ip)
227 {
228         int ret = 0;
229
230         EBUG_ON(level >= BTREE_MAX_DEPTH);
231         EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
232
233         if (likely(six_trylock_type(&b->c.lock, type)) ||
234             btree_node_lock_increment(trans, b, level, type) ||
235             !(ret = __bch2_btree_node_lock(trans, path, b, pos, level, type,
236                                            should_sleep_fn, p, ip))) {
237 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
238                 path->l[b->c.level].lock_taken_time = ktime_get_ns();
239 #endif
240         }
241
242         return ret;
243 }
244
245 bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned);
246
247 static inline bool bch2_btree_node_relock(struct btree_trans *trans,
248                                           struct btree_path *path, unsigned level)
249 {
250         EBUG_ON(btree_node_locked(path, level) &&
251                 btree_node_locked_type(path, level) !=
252                 __btree_lock_want(path, level));
253
254         return likely(btree_node_locked(path, level)) ||
255                 __bch2_btree_node_relock(trans, path, level);
256 }
257
258 /*
259  * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
260  * succeed:
261  */
262 static inline void
263 bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
264                                      struct btree *b)
265 {
266         struct btree_path *linked;
267
268         EBUG_ON(path->l[b->c.level].b != b);
269         EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
270
271         trans_for_each_path_with_node(trans, b, linked)
272                 linked->l[b->c.level].lock_seq += 2;
273
274         six_unlock_write(&b->c.lock);
275 }
276
277 void bch2_btree_node_unlock_write(struct btree_trans *,
278                         struct btree_path *, struct btree *);
279
280 void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *);
281
282 static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
283                                               struct btree_path *path,
284                                               struct btree *b)
285 {
286         EBUG_ON(path->l[b->c.level].b != b);
287         EBUG_ON(path->l[b->c.level].lock_seq != b->c.lock.state.seq);
288         EBUG_ON(!btree_node_intent_locked(path, b->c.level));
289
290         if (unlikely(!six_trylock_write(&b->c.lock)))
291                 __bch2_btree_node_lock_write(trans, b);
292 }
293
294 static inline void btree_path_set_should_be_locked(struct btree_path *path)
295 {
296         EBUG_ON(!btree_node_locked(path, path->level));
297         EBUG_ON(path->uptodate);
298
299         path->should_be_locked = true;
300 }
301
302 static inline void __btree_path_set_level_up(struct btree_trans *trans,
303                                       struct btree_path *path,
304                                       unsigned l)
305 {
306         btree_node_unlock(trans, path, l);
307         path->l[l].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
308 }
309
310 static inline void btree_path_set_level_up(struct btree_trans *trans,
311                                     struct btree_path *path)
312 {
313         __btree_path_set_level_up(trans, path, path->level++);
314         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
315 }
316
317 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
318                                 struct btree_path *, struct btree *, unsigned);
319
320 #endif /* _BCACHEFS_BTREE_LOCKING_H */