]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_locking.h
b4434eca0746c7635c10b291534f526661dc4cf6
[bcachefs-tools-debian] / libbcachefs / btree_locking.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_LOCKING_H
3 #define _BCACHEFS_BTREE_LOCKING_H
4
5 /*
6  * Only for internal btree use:
7  *
8  * The btree iterator tracks what locks it wants to take, and what locks it
9  * currently has - here we have wrappers for locking/unlocking btree nodes and
10  * updating the iterator state
11  */
12
13 #include <linux/six.h>
14
15 #include "btree_iter.h"
16
17 /* matches six lock types */
18 enum btree_node_locked_type {
19         BTREE_NODE_UNLOCKED             = -1,
20         BTREE_NODE_READ_LOCKED          = SIX_LOCK_read,
21         BTREE_NODE_INTENT_LOCKED        = SIX_LOCK_intent,
22 };
23
24 static inline int btree_node_locked_type(struct btree_path *path,
25                                          unsigned level)
26 {
27         /*
28          * We're relying on the fact that if nodes_intent_locked is set
29          * nodes_locked must be set as well, so that we can compute without
30          * branches:
31          */
32         return BTREE_NODE_UNLOCKED +
33                 ((path->nodes_locked >> level) & 1) +
34                 ((path->nodes_intent_locked >> level) & 1);
35 }
36
37 static inline bool btree_node_intent_locked(struct btree_path *path,
38                                             unsigned level)
39 {
40         return btree_node_locked_type(path, level) == BTREE_NODE_INTENT_LOCKED;
41 }
42
43 static inline bool btree_node_read_locked(struct btree_path *path,
44                                           unsigned level)
45 {
46         return btree_node_locked_type(path, level) == BTREE_NODE_READ_LOCKED;
47 }
48
49 static inline bool btree_node_locked(struct btree_path *path, unsigned level)
50 {
51         return path->nodes_locked & (1 << level);
52 }
53
54 static inline void mark_btree_node_unlocked(struct btree_path *path,
55                                             unsigned level)
56 {
57         path->nodes_locked &= ~(1 << level);
58         path->nodes_intent_locked &= ~(1 << level);
59 }
60
61 static inline void mark_btree_node_locked(struct btree_path *path,
62                                           unsigned level,
63                                           enum six_lock_type type)
64 {
65         /* relying on this to avoid a branch */
66         BUILD_BUG_ON(SIX_LOCK_read   != 0);
67         BUILD_BUG_ON(SIX_LOCK_intent != 1);
68
69         path->nodes_locked |= 1 << level;
70         path->nodes_intent_locked |= type << level;
71 }
72
73 static inline void mark_btree_node_intent_locked(struct btree_path *path,
74                                                  unsigned level)
75 {
76         mark_btree_node_locked(path, level, SIX_LOCK_intent);
77 }
78
79 static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
80 {
81         return level < path->locks_want
82                 ? SIX_LOCK_intent
83                 : SIX_LOCK_read;
84 }
85
86 static inline enum btree_node_locked_type
87 btree_lock_want(struct btree_path *path, int level)
88 {
89         if (level < path->level)
90                 return BTREE_NODE_UNLOCKED;
91         if (level < path->locks_want)
92                 return BTREE_NODE_INTENT_LOCKED;
93         if (level == path->level)
94                 return BTREE_NODE_READ_LOCKED;
95         return BTREE_NODE_UNLOCKED;
96 }
97
98 static inline void btree_node_unlock(struct btree_path *path, unsigned level)
99 {
100         int lock_type = btree_node_locked_type(path, level);
101
102         EBUG_ON(level >= BTREE_MAX_DEPTH);
103
104         if (lock_type != BTREE_NODE_UNLOCKED)
105                 six_unlock_type(&path->l[level].b->c.lock, lock_type);
106         mark_btree_node_unlocked(path, level);
107 }
108
109 static inline void __bch2_btree_path_unlock(struct btree_path *path)
110 {
111         btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
112
113         while (path->nodes_locked)
114                 btree_node_unlock(path, __ffs(path->nodes_locked));
115 }
116
117 static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
118 {
119         switch (type) {
120         case SIX_LOCK_read:
121                 return BCH_TIME_btree_lock_contended_read;
122         case SIX_LOCK_intent:
123                 return BCH_TIME_btree_lock_contended_intent;
124         case SIX_LOCK_write:
125                 return BCH_TIME_btree_lock_contended_write;
126         default:
127                 BUG();
128         }
129 }
130
131 static inline bool btree_node_lock_type(struct btree_trans *trans,
132                                        struct btree_path *path,
133                                        struct btree *b,
134                                        struct bpos pos, unsigned level,
135                                        enum six_lock_type type,
136                                        six_lock_should_sleep_fn should_sleep_fn, void *p)
137 {
138         struct bch_fs *c = trans->c;
139         u64 start_time;
140         bool ret;
141
142         if (six_trylock_type(&b->c.lock, type))
143                 return true;
144
145         start_time = local_clock();
146
147         trans->locking_path_idx = path->idx;
148         trans->locking_pos      = pos;
149         trans->locking_btree_id = path->btree_id;
150         trans->locking_level    = level;
151         trans->locking_lock_type = type;
152         trans->locking          = b;
153         ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
154         trans->locking = NULL;
155
156         if (ret)
157                 bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
158
159         return ret;
160 }
161
162 /*
163  * Lock a btree node if we already have it locked on one of our linked
164  * iterators:
165  */
166 static inline bool btree_node_lock_increment(struct btree_trans *trans,
167                                              struct btree *b, unsigned level,
168                                              enum btree_node_locked_type want)
169 {
170         struct btree_path *path;
171
172         trans_for_each_path(trans, path)
173                 if (path->l[level].b == b &&
174                     btree_node_locked_type(path, level) >= want) {
175                         six_lock_increment(&b->c.lock, want);
176                         return true;
177                 }
178
179         return false;
180 }
181
182 bool __bch2_btree_node_lock(struct btree_trans *, struct btree_path *,
183                             struct btree *, struct bpos, unsigned,
184                             enum six_lock_type,
185                             six_lock_should_sleep_fn, void *,
186                             unsigned long);
187
188 static inline bool btree_node_lock(struct btree_trans *trans,
189                         struct btree_path *path,
190                         struct btree *b, struct bpos pos, unsigned level,
191                         enum six_lock_type type,
192                         six_lock_should_sleep_fn should_sleep_fn, void *p,
193                         unsigned long ip)
194 {
195         EBUG_ON(level >= BTREE_MAX_DEPTH);
196         EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
197
198         return likely(six_trylock_type(&b->c.lock, type)) ||
199                 btree_node_lock_increment(trans, b, level, type) ||
200                 __bch2_btree_node_lock(trans, path, b, pos, level, type,
201                                        should_sleep_fn, p, ip);
202 }
203
204 bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned);
205
206 static inline bool bch2_btree_node_relock(struct btree_trans *trans,
207                                           struct btree_path *path, unsigned level)
208 {
209         EBUG_ON(btree_node_locked(path, level) &&
210                 btree_node_locked_type(path, level) !=
211                 __btree_lock_want(path, level));
212
213         return likely(btree_node_locked(path, level)) ||
214                 __bch2_btree_node_relock(trans, path, level);
215 }
216
217 /*
218  * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
219  * succeed:
220  */
221 static inline void
222 bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
223                                      struct btree *b)
224 {
225         struct btree_path *linked;
226
227         EBUG_ON(path->l[b->c.level].b != b);
228         EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
229
230         trans_for_each_path_with_node(trans, b, linked)
231                 linked->l[b->c.level].lock_seq += 2;
232
233         six_unlock_write(&b->c.lock);
234 }
235
236 void bch2_btree_node_unlock_write(struct btree_trans *,
237                         struct btree_path *, struct btree *);
238
239 void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *);
240
241 static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
242                                               struct btree_path *path,
243                                               struct btree *b)
244 {
245         EBUG_ON(path->l[b->c.level].b != b);
246         EBUG_ON(path->l[b->c.level].lock_seq != b->c.lock.state.seq);
247         EBUG_ON(!btree_node_intent_locked(path, b->c.level));
248
249         if (unlikely(!six_trylock_write(&b->c.lock)))
250                 __bch2_btree_node_lock_write(trans, b);
251 }
252
253 #endif /* _BCACHEFS_BTREE_LOCKING_H */
254
255