1 #ifndef _BCACHEFS_SIX_H
2 #define _BCACHEFS_SIX_H
5 * Shared/intent/exclusive locks: sleepable read/write locks, much like rw
6 * semaphores, except with a third intermediate state, intent. Basic operations
9 * six_lock_read(&foo->lock);
10 * six_unlock_read(&foo->lock);
12 * six_lock_intent(&foo->lock);
13 * six_unlock_intent(&foo->lock);
15 * six_lock_write(&foo->lock);
16 * six_unlock_write(&foo->lock);
18 * Intent locks block other intent locks, but do not block read locks, and you
19 * must have an intent lock held before taking a write lock, like so:
21 * six_lock_intent(&foo->lock);
22 * six_lock_write(&foo->lock);
23 * six_unlock_write(&foo->lock);
24 * six_unlock_intent(&foo->lock);
29 * six_trylock_intent()
32 * six_lock_downgrade(): convert from intent to read
33 * six_lock_tryupgrade(): attempt to convert from read to intent
35 * Locks also embed a sequence number, which is incremented when the lock is
36 * locked or unlocked for write. The current sequence number can be grabbed
37 * while a lock is held from lock->state.seq; then, if you drop the lock you can
38 * use six_relock_(read|intent_write)(lock, seq) to attempt to retake the lock
39 * iff it hasn't been locked for write in the meantime.
41 * There are also operations that take the lock type as a parameter, where the
42 * type is one of SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write:
44 * six_lock_type(lock, type)
45 * six_unlock_type(lock, type)
46 * six_relock(lock, type, seq)
47 * six_trylock_type(lock, type)
48 * six_trylock_convert(lock, from, to)
50 * A lock may be held multiple types by the same thread (for read or intent,
51 * not write) - up to SIX_LOCK_MAX_RECURSE. However, the six locks code does
52 * _not_ implement the actual recursive checks itself though - rather, if your
53 * code (e.g. btree iterator code) knows that the current thread already has a
54 * lock held, and for the correct type, six_lock_increment() may be used to
55 * bump up the counter for that type - the only effect is that one more call to
56 * unlock will be required before the lock is unlocked.
59 #include <linux/lockdep.h>
60 #include <linux/osq_lock.h>
61 #include <linux/sched.h>
62 #include <linux/types.h>
66 #define SIX_LOCK_SEPARATE_LOCKFNS
68 union six_lock_state {
78 /* for waitlist_bitnr() */
83 unsigned read_lock:26;
84 unsigned intent_lock:3;
87 * seq works much like in seqlocks: it's incremented every time
88 * we lock and unlock for write.
90 * If it's odd write lock is held, even unlocked.
92 * Thus readers can unlock, and then lock again later iff it
93 * hasn't been modified in the meantime.
99 #define SIX_LOCK_MAX_RECURSE ((1 << 3) - 1)
108 union six_lock_state state;
109 struct task_struct *owner;
110 struct optimistic_spin_queue osq;
112 raw_spinlock_t wait_lock;
113 struct list_head wait_list[2];
114 #ifdef CONFIG_DEBUG_LOCK_ALLOC
115 struct lockdep_map dep_map;
119 static __always_inline void __six_lock_init(struct six_lock *lock,
121 struct lock_class_key *key)
123 atomic64_set(&lock->state.counter, 0);
124 raw_spin_lock_init(&lock->wait_lock);
125 INIT_LIST_HEAD(&lock->wait_list[SIX_LOCK_read]);
126 INIT_LIST_HEAD(&lock->wait_list[SIX_LOCK_intent]);
127 #ifdef CONFIG_DEBUG_LOCK_ALLOC
128 debug_check_no_locks_freed((void *) lock, sizeof(*lock));
129 lockdep_init_map(&lock->dep_map, name, key, 0);
133 #define six_lock_init(lock) \
135 static struct lock_class_key __key; \
137 __six_lock_init((lock), #lock, &__key); \
140 #define __SIX_VAL(field, _v) (((union six_lock_state) { .field = _v }).v)
142 #ifdef SIX_LOCK_SEPARATE_LOCKFNS
144 #define __SIX_LOCK(type) \
145 bool six_trylock_##type(struct six_lock *); \
146 bool six_relock_##type(struct six_lock *, u32); \
147 void six_lock_##type(struct six_lock *); \
148 void six_unlock_##type(struct six_lock *);
155 #define SIX_LOCK_DISPATCH(type, fn, ...) \
157 case SIX_LOCK_read: \
158 return fn##_read(__VA_ARGS__); \
159 case SIX_LOCK_intent: \
160 return fn##_intent(__VA_ARGS__); \
161 case SIX_LOCK_write: \
162 return fn##_write(__VA_ARGS__); \
167 static inline bool six_trylock_type(struct six_lock *lock, enum six_lock_type type)
169 SIX_LOCK_DISPATCH(type, six_trylock, lock);
172 static inline bool six_relock_type(struct six_lock *lock, enum six_lock_type type,
175 SIX_LOCK_DISPATCH(type, six_relock, lock, seq);
178 static inline void six_lock_type(struct six_lock *lock, enum six_lock_type type)
180 SIX_LOCK_DISPATCH(type, six_lock, lock);
183 static inline void six_unlock_type(struct six_lock *lock, enum six_lock_type type)
185 SIX_LOCK_DISPATCH(type, six_unlock, lock);
190 bool six_trylock_type(struct six_lock *, enum six_lock_type);
191 bool six_relock_type(struct six_lock *, enum six_lock_type, unsigned);
192 void six_lock_type(struct six_lock *, enum six_lock_type);
193 void six_unlock_type(struct six_lock *, enum six_lock_type);
195 #define __SIX_LOCK(type) \
196 static __always_inline bool six_trylock_##type(struct six_lock *lock) \
198 return six_trylock_type(lock, SIX_LOCK_##type); \
201 static __always_inline bool six_relock_##type(struct six_lock *lock, u32 seq)\
203 return six_relock_type(lock, SIX_LOCK_##type, seq); \
206 static __always_inline void six_lock_##type(struct six_lock *lock) \
208 six_lock_type(lock, SIX_LOCK_##type); \
211 static __always_inline void six_unlock_##type(struct six_lock *lock) \
213 six_unlock_type(lock, SIX_LOCK_##type); \
223 void six_lock_downgrade(struct six_lock *);
224 bool six_lock_tryupgrade(struct six_lock *);
225 bool six_trylock_convert(struct six_lock *, enum six_lock_type,
228 void six_lock_increment(struct six_lock *, enum six_lock_type);
230 #endif /* _BCACHEFS_SIX_H */