]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_locking.h
Update bcachefs sources to d868a87c67 bcachefs: fix initial gc
[bcachefs-tools-debian] / libbcachefs / btree_locking.h
1 #ifndef _BCACHEFS_BTREE_LOCKING_H
2 #define _BCACHEFS_BTREE_LOCKING_H
3
4 /*
5  * Only for internal btree use:
6  *
7  * The btree iterator tracks what locks it wants to take, and what locks it
8  * currently has - here we have wrappers for locking/unlocking btree nodes and
9  * updating the iterator state
10  */
11
12 #include <linux/six.h>
13
14 #include "btree_iter.h"
15
16 /* matches six lock types */
17 enum btree_node_locked_type {
18         BTREE_NODE_UNLOCKED             = -1,
19         BTREE_NODE_READ_LOCKED          = SIX_LOCK_read,
20         BTREE_NODE_INTENT_LOCKED        = SIX_LOCK_intent,
21 };
22
23 static inline int btree_node_locked_type(struct btree_iter *iter,
24                                          unsigned level)
25 {
26         /*
27          * We're relying on the fact that if nodes_intent_locked is set
28          * nodes_locked must be set as well, so that we can compute without
29          * branches:
30          */
31         return BTREE_NODE_UNLOCKED +
32                 ((iter->nodes_locked >> level) & 1) +
33                 ((iter->nodes_intent_locked >> level) & 1);
34 }
35
36 static inline bool btree_node_intent_locked(struct btree_iter *iter,
37                                             unsigned level)
38 {
39         return btree_node_locked_type(iter, level) == BTREE_NODE_INTENT_LOCKED;
40 }
41
42 static inline bool btree_node_read_locked(struct btree_iter *iter,
43                                           unsigned level)
44 {
45         return btree_node_locked_type(iter, level) == BTREE_NODE_READ_LOCKED;
46 }
47
48 static inline bool btree_node_locked(struct btree_iter *iter, unsigned level)
49 {
50         return iter->nodes_locked & (1 << level);
51 }
52
53 static inline void mark_btree_node_unlocked(struct btree_iter *iter,
54                                             unsigned level)
55 {
56         iter->nodes_locked &= ~(1 << level);
57         iter->nodes_intent_locked &= ~(1 << level);
58 }
59
60 static inline void mark_btree_node_locked(struct btree_iter *iter,
61                                           unsigned level,
62                                           enum six_lock_type type)
63 {
64         /* relying on this to avoid a branch */
65         BUILD_BUG_ON(SIX_LOCK_read   != 0);
66         BUILD_BUG_ON(SIX_LOCK_intent != 1);
67
68         iter->nodes_locked |= 1 << level;
69         iter->nodes_intent_locked |= type << level;
70 }
71
72 static inline void mark_btree_node_intent_locked(struct btree_iter *iter,
73                                                  unsigned level)
74 {
75         mark_btree_node_locked(iter, level, SIX_LOCK_intent);
76 }
77
78 static inline enum six_lock_type __btree_lock_want(struct btree_iter *iter, int level)
79 {
80         return level < iter->locks_want
81                 ? SIX_LOCK_intent
82                 : SIX_LOCK_read;
83 }
84
85 static inline enum btree_node_locked_type
86 btree_lock_want(struct btree_iter *iter, int level)
87 {
88         if (level < iter->level)
89                 return BTREE_NODE_UNLOCKED;
90         if (level < iter->locks_want)
91                 return BTREE_NODE_INTENT_LOCKED;
92         if (level == iter->level)
93                 return BTREE_NODE_READ_LOCKED;
94         return BTREE_NODE_UNLOCKED;
95 }
96
97 static inline void __btree_node_unlock(struct btree_iter *iter, unsigned level)
98 {
99         int lock_type = btree_node_locked_type(iter, level);
100
101         EBUG_ON(level >= BTREE_MAX_DEPTH);
102
103         if (lock_type != BTREE_NODE_UNLOCKED)
104                 six_unlock_type(&iter->l[level].b->lock, lock_type);
105         mark_btree_node_unlocked(iter, level);
106 }
107
108 static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
109 {
110         BUG_ON(!level && iter->flags & BTREE_ITER_NOUNLOCK);
111
112         __btree_node_unlock(iter, level);
113 }
114
115 static inline void __bch2_btree_iter_unlock(struct btree_iter *iter)
116 {
117         btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
118
119         while (iter->nodes_locked)
120                 btree_node_unlock(iter, __ffs(iter->nodes_locked));
121 }
122
123 static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
124 {
125         switch (type) {
126         case SIX_LOCK_read:
127                 return BCH_TIME_btree_lock_contended_read;
128         case SIX_LOCK_intent:
129                 return BCH_TIME_btree_lock_contended_intent;
130         case SIX_LOCK_write:
131                 return BCH_TIME_btree_lock_contended_write;
132         default:
133                 BUG();
134         }
135 }
136
137 /*
138  * wrapper around six locks that just traces lock contended time
139  */
140 static inline void __btree_node_lock_type(struct bch_fs *c, struct btree *b,
141                                           enum six_lock_type type)
142 {
143         u64 start_time = local_clock();
144
145         six_lock_type(&b->lock, type);
146         bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
147 }
148
149 static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b,
150                                         enum six_lock_type type)
151 {
152         if (!six_trylock_type(&b->lock, type))
153                 __btree_node_lock_type(c, b, type);
154 }
155
156 /*
157  * Lock a btree node if we already have it locked on one of our linked
158  * iterators:
159  */
160 static inline bool btree_node_lock_increment(struct btree_iter *iter,
161                                              struct btree *b, unsigned level,
162                                              enum btree_node_locked_type want)
163 {
164         struct btree_iter *linked;
165
166         trans_for_each_iter(iter->trans, linked)
167                 if (linked != iter &&
168                     linked->l[level].b == b &&
169                     btree_node_locked_type(linked, level) >= want) {
170                         six_lock_increment(&b->lock, want);
171                         return true;
172                 }
173
174         return false;
175 }
176
177 bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned,
178                             struct btree_iter *, enum six_lock_type, bool);
179
180 static inline bool btree_node_lock(struct btree *b, struct bpos pos,
181                                    unsigned level,
182                                    struct btree_iter *iter,
183                                    enum six_lock_type type,
184                                    bool may_drop_locks)
185 {
186         EBUG_ON(level >= BTREE_MAX_DEPTH);
187
188         return likely(six_trylock_type(&b->lock, type)) ||
189                 btree_node_lock_increment(iter, b, level, type) ||
190                 __bch2_btree_node_lock(b, pos, level, iter,
191                                        type, may_drop_locks);
192 }
193
194 bool __bch2_btree_node_relock(struct btree_iter *, unsigned);
195
196 static inline bool bch2_btree_node_relock(struct btree_iter *iter,
197                                           unsigned level)
198 {
199         EBUG_ON(btree_node_locked(iter, level) &&
200                 btree_node_locked_type(iter, level) !=
201                 __btree_lock_want(iter, level));
202
203         return likely(btree_node_locked(iter, level)) ||
204                 __bch2_btree_node_relock(iter, level);
205 }
206
207 void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);
208
209 void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
210
211 static inline void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
212 {
213         EBUG_ON(iter->l[b->level].b != b);
214         EBUG_ON(iter->l[b->level].lock_seq != b->lock.state.seq);
215
216         if (!six_trylock_write(&b->lock))
217                 __bch2_btree_node_lock_write(b, iter);
218 }
219
220 #endif /* _BCACHEFS_BTREE_LOCKING_H */
221
222