]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_locking.h
Update bcachefs sources to 386f00b639 bcachefs: Snapshot creation, deletion
[bcachefs-tools-debian] / libbcachefs / btree_locking.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_LOCKING_H
3 #define _BCACHEFS_BTREE_LOCKING_H
4
5 /*
6  * Only for internal btree use:
7  *
8  * The btree iterator tracks what locks it wants to take, and what locks it
9  * currently has - here we have wrappers for locking/unlocking btree nodes and
10  * updating the iterator state
11  */
12
13 #include <linux/six.h>
14
15 #include "btree_iter.h"
16
17 /* matches six lock types */
18 enum btree_node_locked_type {
19         BTREE_NODE_UNLOCKED             = -1,
20         BTREE_NODE_READ_LOCKED          = SIX_LOCK_read,
21         BTREE_NODE_INTENT_LOCKED        = SIX_LOCK_intent,
22 };
23
24 static inline int btree_node_locked_type(struct btree_path *path,
25                                          unsigned level)
26 {
27         /*
28          * We're relying on the fact that if nodes_intent_locked is set
29          * nodes_locked must be set as well, so that we can compute without
30          * branches:
31          */
32         return BTREE_NODE_UNLOCKED +
33                 ((path->nodes_locked >> level) & 1) +
34                 ((path->nodes_intent_locked >> level) & 1);
35 }
36
37 static inline bool btree_node_intent_locked(struct btree_path *path,
38                                             unsigned level)
39 {
40         return btree_node_locked_type(path, level) == BTREE_NODE_INTENT_LOCKED;
41 }
42
43 static inline bool btree_node_read_locked(struct btree_path *path,
44                                           unsigned level)
45 {
46         return btree_node_locked_type(path, level) == BTREE_NODE_READ_LOCKED;
47 }
48
49 static inline bool btree_node_locked(struct btree_path *path, unsigned level)
50 {
51         return path->nodes_locked & (1 << level);
52 }
53
54 static inline void mark_btree_node_unlocked(struct btree_path *path,
55                                             unsigned level)
56 {
57         path->nodes_locked &= ~(1 << level);
58         path->nodes_intent_locked &= ~(1 << level);
59 }
60
61 static inline void mark_btree_node_locked(struct btree_path *path,
62                                           unsigned level,
63                                           enum six_lock_type type)
64 {
65         /* relying on this to avoid a branch */
66         BUILD_BUG_ON(SIX_LOCK_read   != 0);
67         BUILD_BUG_ON(SIX_LOCK_intent != 1);
68
69         path->nodes_locked |= 1 << level;
70         path->nodes_intent_locked |= type << level;
71 }
72
73 static inline void mark_btree_node_intent_locked(struct btree_path *path,
74                                                  unsigned level)
75 {
76         mark_btree_node_locked(path, level, SIX_LOCK_intent);
77 }
78
79 static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
80 {
81         return level < path->locks_want
82                 ? SIX_LOCK_intent
83                 : SIX_LOCK_read;
84 }
85
86 static inline enum btree_node_locked_type
87 btree_lock_want(struct btree_path *path, int level)
88 {
89         if (level < path->level)
90                 return BTREE_NODE_UNLOCKED;
91         if (level < path->locks_want)
92                 return BTREE_NODE_INTENT_LOCKED;
93         if (level == path->level)
94                 return BTREE_NODE_READ_LOCKED;
95         return BTREE_NODE_UNLOCKED;
96 }
97
98 static inline void btree_node_unlock(struct btree_path *path, unsigned level)
99 {
100         int lock_type = btree_node_locked_type(path, level);
101
102         EBUG_ON(level >= BTREE_MAX_DEPTH);
103
104         if (lock_type != BTREE_NODE_UNLOCKED)
105                 six_unlock_type(&path->l[level].b->c.lock, lock_type);
106         mark_btree_node_unlocked(path, level);
107 }
108
109 static inline void __bch2_btree_path_unlock(struct btree_path *path)
110 {
111         btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
112
113         while (path->nodes_locked)
114                 btree_node_unlock(path, __ffs(path->nodes_locked));
115 }
116
117 static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
118 {
119         switch (type) {
120         case SIX_LOCK_read:
121                 return BCH_TIME_btree_lock_contended_read;
122         case SIX_LOCK_intent:
123                 return BCH_TIME_btree_lock_contended_intent;
124         case SIX_LOCK_write:
125                 return BCH_TIME_btree_lock_contended_write;
126         default:
127                 BUG();
128         }
129 }
130
131 /*
132  * wrapper around six locks that just traces lock contended time
133  */
134 static inline void __btree_node_lock_type(struct bch_fs *c, struct btree *b,
135                                           enum six_lock_type type)
136 {
137         u64 start_time = local_clock();
138
139         six_lock_type(&b->c.lock, type, NULL, NULL);
140         bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
141 }
142
143 static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b,
144                                         enum six_lock_type type)
145 {
146         if (!six_trylock_type(&b->c.lock, type))
147                 __btree_node_lock_type(c, b, type);
148 }
149
150 /*
151  * Lock a btree node if we already have it locked on one of our linked
152  * iterators:
153  */
154 static inline bool btree_node_lock_increment(struct btree_trans *trans,
155                                              struct btree *b, unsigned level,
156                                              enum btree_node_locked_type want)
157 {
158         struct btree_path *path;
159
160         trans_for_each_path(trans, path)
161                 if (path->l[level].b == b &&
162                     btree_node_locked_type(path, level) >= want) {
163                         six_lock_increment(&b->c.lock, want);
164                         return true;
165                 }
166
167         return false;
168 }
169
170 bool __bch2_btree_node_lock(struct btree_trans *, struct btree_path *,
171                             struct btree *, struct bpos, unsigned,
172                             enum six_lock_type,
173                             six_lock_should_sleep_fn, void *,
174                             unsigned long);
175
176 static inline bool btree_node_lock(struct btree_trans *trans,
177                         struct btree_path *path,
178                         struct btree *b, struct bpos pos, unsigned level,
179                         enum six_lock_type type,
180                         six_lock_should_sleep_fn should_sleep_fn, void *p,
181                         unsigned long ip)
182 {
183         EBUG_ON(level >= BTREE_MAX_DEPTH);
184         EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
185
186         return likely(six_trylock_type(&b->c.lock, type)) ||
187                 btree_node_lock_increment(trans, b, level, type) ||
188                 __bch2_btree_node_lock(trans, path, b, pos, level, type,
189                                        should_sleep_fn, p, ip);
190 }
191
192 bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned);
193
194 static inline bool bch2_btree_node_relock(struct btree_trans *trans,
195                                           struct btree_path *path, unsigned level)
196 {
197         EBUG_ON(btree_node_locked(path, level) &&
198                 btree_node_locked_type(path, level) !=
199                 __btree_lock_want(path, level));
200
201         return likely(btree_node_locked(path, level)) ||
202                 __bch2_btree_node_relock(trans, path, level);
203 }
204
205 /*
206  * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
207  * succeed:
208  */
209 static inline void
210 bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
211                                      struct btree *b)
212 {
213         struct btree_path *linked;
214
215         EBUG_ON(path->l[b->c.level].b != b);
216         EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
217
218         trans_for_each_path_with_node(trans, b, linked)
219                 linked->l[b->c.level].lock_seq += 2;
220
221         six_unlock_write(&b->c.lock);
222 }
223
224 void bch2_btree_node_unlock_write(struct btree_trans *,
225                         struct btree_path *, struct btree *);
226
227 void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *);
228
229 static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
230                                               struct btree_path *path,
231                                               struct btree *b)
232 {
233         EBUG_ON(path->l[b->c.level].b != b);
234         EBUG_ON(path->l[b->c.level].lock_seq != b->c.lock.state.seq);
235         EBUG_ON(!btree_node_intent_locked(path, b->c.level));
236
237         if (unlikely(!six_trylock_write(&b->c.lock)))
238                 __bch2_btree_node_lock_write(trans, b);
239 }
240
241 #endif /* _BCACHEFS_BTREE_LOCKING_H */
242
243