]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_locking.h
67c970d727ac09089c96920084a58e5c132a69bf
[bcachefs-tools-debian] / libbcachefs / btree_locking.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_LOCKING_H
3 #define _BCACHEFS_BTREE_LOCKING_H
4
5 /*
6  * Only for internal btree use:
7  *
8  * The btree iterator tracks what locks it wants to take, and what locks it
9  * currently has - here we have wrappers for locking/unlocking btree nodes and
10  * updating the iterator state
11  */
12
13 #include <linux/six.h>
14
15 #include "btree_iter.h"
16
17 /* matches six lock types */
18 enum btree_node_locked_type {
19         BTREE_NODE_UNLOCKED             = -1,
20         BTREE_NODE_READ_LOCKED          = SIX_LOCK_read,
21         BTREE_NODE_INTENT_LOCKED        = SIX_LOCK_intent,
22 };
23
24 static inline int btree_node_locked_type(struct btree_path *path,
25                                          unsigned level)
26 {
27         /*
28          * We're relying on the fact that if nodes_intent_locked is set
29          * nodes_locked must be set as well, so that we can compute without
30          * branches:
31          */
32         return BTREE_NODE_UNLOCKED +
33                 ((path->nodes_locked >> level) & 1) +
34                 ((path->nodes_intent_locked >> level) & 1);
35 }
36
37 static inline bool btree_node_intent_locked(struct btree_path *path,
38                                             unsigned level)
39 {
40         return btree_node_locked_type(path, level) == BTREE_NODE_INTENT_LOCKED;
41 }
42
43 static inline bool btree_node_read_locked(struct btree_path *path,
44                                           unsigned level)
45 {
46         return btree_node_locked_type(path, level) == BTREE_NODE_READ_LOCKED;
47 }
48
49 static inline bool btree_node_locked(struct btree_path *path, unsigned level)
50 {
51         return path->nodes_locked & (1 << level);
52 }
53
54 static inline void mark_btree_node_unlocked(struct btree_path *path,
55                                             unsigned level)
56 {
57         path->nodes_locked &= ~(1 << level);
58         path->nodes_intent_locked &= ~(1 << level);
59 }
60
61 static inline void mark_btree_node_locked(struct btree_trans *trans,
62                                           struct btree_path *path,
63                                           unsigned level,
64                                           enum six_lock_type type)
65 {
66         /* relying on this to avoid a branch */
67         BUILD_BUG_ON(SIX_LOCK_read   != 0);
68         BUILD_BUG_ON(SIX_LOCK_intent != 1);
69
70         BUG_ON(trans->in_traverse_all && path->sorted_idx > trans->traverse_all_idx);
71
72         path->nodes_locked |= 1 << level;
73         path->nodes_intent_locked |= type << level;
74 }
75
76 static inline void mark_btree_node_intent_locked(struct btree_trans *trans,
77                                                  struct btree_path *path,
78                                                  unsigned level)
79 {
80         mark_btree_node_locked(trans, path, level, SIX_LOCK_intent);
81 }
82
83 static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
84 {
85         return level < path->locks_want
86                 ? SIX_LOCK_intent
87                 : SIX_LOCK_read;
88 }
89
90 static inline enum btree_node_locked_type
91 btree_lock_want(struct btree_path *path, int level)
92 {
93         if (level < path->level)
94                 return BTREE_NODE_UNLOCKED;
95         if (level < path->locks_want)
96                 return BTREE_NODE_INTENT_LOCKED;
97         if (level == path->level)
98                 return BTREE_NODE_READ_LOCKED;
99         return BTREE_NODE_UNLOCKED;
100 }
101
102 static inline void btree_node_unlock(struct btree_path *path, unsigned level)
103 {
104         int lock_type = btree_node_locked_type(path, level);
105
106         EBUG_ON(level >= BTREE_MAX_DEPTH);
107
108         if (lock_type != BTREE_NODE_UNLOCKED)
109                 six_unlock_type(&path->l[level].b->c.lock, lock_type);
110         mark_btree_node_unlocked(path, level);
111 }
112
113 static inline void __bch2_btree_path_unlock(struct btree_path *path)
114 {
115         btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
116
117         while (path->nodes_locked)
118                 btree_node_unlock(path, __ffs(path->nodes_locked));
119 }
120
121 static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
122 {
123         switch (type) {
124         case SIX_LOCK_read:
125                 return BCH_TIME_btree_lock_contended_read;
126         case SIX_LOCK_intent:
127                 return BCH_TIME_btree_lock_contended_intent;
128         case SIX_LOCK_write:
129                 return BCH_TIME_btree_lock_contended_write;
130         default:
131                 BUG();
132         }
133 }
134
135 static inline bool btree_node_lock_type(struct btree_trans *trans,
136                                        struct btree_path *path,
137                                        struct btree *b,
138                                        struct bpos pos, unsigned level,
139                                        enum six_lock_type type,
140                                        six_lock_should_sleep_fn should_sleep_fn, void *p)
141 {
142         struct bch_fs *c = trans->c;
143         u64 start_time;
144         bool ret;
145
146         if (six_trylock_type(&b->c.lock, type))
147                 return true;
148
149         start_time = local_clock();
150
151         trans->locking_path_idx = path->idx;
152         trans->locking_pos      = pos;
153         trans->locking_btree_id = path->btree_id;
154         trans->locking_level    = level;
155         trans->locking_lock_type = type;
156         trans->locking          = b;
157         ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
158         trans->locking = NULL;
159
160         if (ret)
161                 bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
162
163         return ret;
164 }
165
166 /*
167  * Lock a btree node if we already have it locked on one of our linked
168  * iterators:
169  */
170 static inline bool btree_node_lock_increment(struct btree_trans *trans,
171                                              struct btree *b, unsigned level,
172                                              enum btree_node_locked_type want)
173 {
174         struct btree_path *path;
175
176         trans_for_each_path(trans, path)
177                 if (path->l[level].b == b &&
178                     btree_node_locked_type(path, level) >= want) {
179                         six_lock_increment(&b->c.lock, want);
180                         return true;
181                 }
182
183         return false;
184 }
185
186 bool __bch2_btree_node_lock(struct btree_trans *, struct btree_path *,
187                             struct btree *, struct bpos, unsigned,
188                             enum six_lock_type,
189                             six_lock_should_sleep_fn, void *,
190                             unsigned long);
191
192 static inline bool btree_node_lock(struct btree_trans *trans,
193                         struct btree_path *path,
194                         struct btree *b, struct bpos pos, unsigned level,
195                         enum six_lock_type type,
196                         six_lock_should_sleep_fn should_sleep_fn, void *p,
197                         unsigned long ip)
198 {
199         EBUG_ON(level >= BTREE_MAX_DEPTH);
200         EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
201
202         return likely(six_trylock_type(&b->c.lock, type)) ||
203                 btree_node_lock_increment(trans, b, level, type) ||
204                 __bch2_btree_node_lock(trans, path, b, pos, level, type,
205                                        should_sleep_fn, p, ip);
206 }
207
208 bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned);
209
210 static inline bool bch2_btree_node_relock(struct btree_trans *trans,
211                                           struct btree_path *path, unsigned level)
212 {
213         EBUG_ON(btree_node_locked(path, level) &&
214                 btree_node_locked_type(path, level) !=
215                 __btree_lock_want(path, level));
216
217         return likely(btree_node_locked(path, level)) ||
218                 __bch2_btree_node_relock(trans, path, level);
219 }
220
221 /*
222  * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
223  * succeed:
224  */
225 static inline void
226 bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
227                                      struct btree *b)
228 {
229         struct btree_path *linked;
230
231         EBUG_ON(path->l[b->c.level].b != b);
232         EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
233
234         trans_for_each_path_with_node(trans, b, linked)
235                 linked->l[b->c.level].lock_seq += 2;
236
237         six_unlock_write(&b->c.lock);
238 }
239
240 void bch2_btree_node_unlock_write(struct btree_trans *,
241                         struct btree_path *, struct btree *);
242
243 void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *);
244
245 static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
246                                               struct btree_path *path,
247                                               struct btree *b)
248 {
249         EBUG_ON(path->l[b->c.level].b != b);
250         EBUG_ON(path->l[b->c.level].lock_seq != b->c.lock.state.seq);
251         EBUG_ON(!btree_node_intent_locked(path, b->c.level));
252
253         if (unlikely(!six_trylock_write(&b->c.lock)))
254                 __bch2_btree_node_lock_write(trans, b);
255 }
256
257 #endif /* _BCACHEFS_BTREE_LOCKING_H */
258
259