]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_locking.h
New upstream snapshot
[bcachefs-tools-debian] / libbcachefs / btree_locking.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_LOCKING_H
3 #define _BCACHEFS_BTREE_LOCKING_H
4
5 /*
6  * Only for internal btree use:
7  *
8  * The btree iterator tracks what locks it wants to take, and what locks it
9  * currently has - here we have wrappers for locking/unlocking btree nodes and
10  * updating the iterator state
11  */
12
13 #include <linux/six.h>
14
15 #include "btree_iter.h"
16
17 /* matches six lock types */
18 enum btree_node_locked_type {
19         BTREE_NODE_UNLOCKED             = -1,
20         BTREE_NODE_READ_LOCKED          = SIX_LOCK_read,
21         BTREE_NODE_INTENT_LOCKED        = SIX_LOCK_intent,
22 };
23
24 static inline int btree_node_locked_type(struct btree_iter *iter,
25                                          unsigned level)
26 {
27         /*
28          * We're relying on the fact that if nodes_intent_locked is set
29          * nodes_locked must be set as well, so that we can compute without
30          * branches:
31          */
32         return BTREE_NODE_UNLOCKED +
33                 ((iter->nodes_locked >> level) & 1) +
34                 ((iter->nodes_intent_locked >> level) & 1);
35 }
36
37 static inline bool btree_node_intent_locked(struct btree_iter *iter,
38                                             unsigned level)
39 {
40         return btree_node_locked_type(iter, level) == BTREE_NODE_INTENT_LOCKED;
41 }
42
43 static inline bool btree_node_read_locked(struct btree_iter *iter,
44                                           unsigned level)
45 {
46         return btree_node_locked_type(iter, level) == BTREE_NODE_READ_LOCKED;
47 }
48
49 static inline bool btree_node_locked(struct btree_iter *iter, unsigned level)
50 {
51         return iter->nodes_locked & (1 << level);
52 }
53
54 static inline void mark_btree_node_unlocked(struct btree_iter *iter,
55                                             unsigned level)
56 {
57         iter->nodes_locked &= ~(1 << level);
58         iter->nodes_intent_locked &= ~(1 << level);
59 }
60
61 static inline void mark_btree_node_locked(struct btree_iter *iter,
62                                           unsigned level,
63                                           enum six_lock_type type)
64 {
65         /* relying on this to avoid a branch */
66         BUILD_BUG_ON(SIX_LOCK_read   != 0);
67         BUILD_BUG_ON(SIX_LOCK_intent != 1);
68
69         iter->nodes_locked |= 1 << level;
70         iter->nodes_intent_locked |= type << level;
71 }
72
73 static inline void mark_btree_node_intent_locked(struct btree_iter *iter,
74                                                  unsigned level)
75 {
76         mark_btree_node_locked(iter, level, SIX_LOCK_intent);
77 }
78
79 static inline enum six_lock_type __btree_lock_want(struct btree_iter *iter, int level)
80 {
81         return level < iter->locks_want
82                 ? SIX_LOCK_intent
83                 : SIX_LOCK_read;
84 }
85
86 static inline enum btree_node_locked_type
87 btree_lock_want(struct btree_iter *iter, int level)
88 {
89         if (level < iter->level)
90                 return BTREE_NODE_UNLOCKED;
91         if (level < iter->locks_want)
92                 return BTREE_NODE_INTENT_LOCKED;
93         if (level == iter->level)
94                 return BTREE_NODE_READ_LOCKED;
95         return BTREE_NODE_UNLOCKED;
96 }
97
98 static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
99 {
100         int lock_type = btree_node_locked_type(iter, level);
101
102         EBUG_ON(level >= BTREE_MAX_DEPTH);
103
104         if (lock_type != BTREE_NODE_UNLOCKED)
105                 six_unlock_type(&iter->l[level].b->c.lock, lock_type);
106         mark_btree_node_unlocked(iter, level);
107 }
108
109 static inline void __bch2_btree_iter_unlock(struct btree_iter *iter)
110 {
111         btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
112
113         while (iter->nodes_locked)
114                 btree_node_unlock(iter, __ffs(iter->nodes_locked));
115 }
116
117 static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
118 {
119         switch (type) {
120         case SIX_LOCK_read:
121                 return BCH_TIME_btree_lock_contended_read;
122         case SIX_LOCK_intent:
123                 return BCH_TIME_btree_lock_contended_intent;
124         case SIX_LOCK_write:
125                 return BCH_TIME_btree_lock_contended_write;
126         default:
127                 BUG();
128         }
129 }
130
131 /*
132  * wrapper around six locks that just traces lock contended time
133  */
134 static inline void __btree_node_lock_type(struct bch_fs *c, struct btree *b,
135                                           enum six_lock_type type)
136 {
137         u64 start_time = local_clock();
138
139         six_lock_type(&b->c.lock, type, NULL, NULL);
140         bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
141 }
142
143 static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b,
144                                         enum six_lock_type type)
145 {
146         if (!six_trylock_type(&b->c.lock, type))
147                 __btree_node_lock_type(c, b, type);
148 }
149
150 /*
151  * Lock a btree node if we already have it locked on one of our linked
152  * iterators:
153  */
154 static inline bool btree_node_lock_increment(struct btree_trans *trans,
155                                              struct btree *b, unsigned level,
156                                              enum btree_node_locked_type want)
157 {
158         struct btree_iter *iter;
159
160         trans_for_each_iter(trans, iter)
161                 if (iter->l[level].b == b &&
162                     btree_node_locked_type(iter, level) >= want) {
163                         six_lock_increment(&b->c.lock, want);
164                         return true;
165                 }
166
167         return false;
168 }
169
170 bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned,
171                             struct btree_iter *, enum six_lock_type,
172                             six_lock_should_sleep_fn, void *,
173                             unsigned long);
174
175 static inline bool btree_node_lock(struct btree *b,
176                         struct bpos pos, unsigned level,
177                         struct btree_iter *iter,
178                         enum six_lock_type type,
179                         six_lock_should_sleep_fn should_sleep_fn, void *p,
180                         unsigned long ip)
181 {
182         struct btree_trans *trans = iter->trans;
183
184         EBUG_ON(level >= BTREE_MAX_DEPTH);
185         EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx)));
186
187         return likely(six_trylock_type(&b->c.lock, type)) ||
188                 btree_node_lock_increment(trans, b, level, type) ||
189                 __bch2_btree_node_lock(b, pos, level, iter, type,
190                                        should_sleep_fn, p, ip);
191 }
192
193 bool __bch2_btree_node_relock(struct btree_iter *, unsigned);
194
195 static inline bool bch2_btree_node_relock(struct btree_iter *iter,
196                                           unsigned level)
197 {
198         EBUG_ON(btree_node_locked(iter, level) &&
199                 btree_node_locked_type(iter, level) !=
200                 __btree_lock_want(iter, level));
201
202         return likely(btree_node_locked(iter, level)) ||
203                 __bch2_btree_node_relock(iter, level);
204 }
205
206 /*
207  * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
208  * succeed:
209  */
210 static inline void
211 bch2_btree_node_unlock_write_inlined(struct btree *b, struct btree_iter *iter)
212 {
213         struct btree_iter *linked;
214
215         EBUG_ON(iter->l[b->c.level].b != b);
216         EBUG_ON(iter->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
217
218         trans_for_each_iter_with_node(iter->trans, b, linked)
219                 linked->l[b->c.level].lock_seq += 2;
220
221         six_unlock_write(&b->c.lock);
222 }
223
224 void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);
225
226 void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
227
228 static inline void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
229 {
230         EBUG_ON(iter->l[b->c.level].b != b);
231         EBUG_ON(iter->l[b->c.level].lock_seq != b->c.lock.state.seq);
232
233         if (unlikely(!six_trylock_write(&b->c.lock)))
234                 __bch2_btree_node_lock_write(b, iter);
235 }
236
237 #endif /* _BCACHEFS_BTREE_LOCKING_H */
238
239