1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/export.h>
4 #include <linux/log2.h>
5 #include <linux/percpu.h>
6 #include <linux/preempt.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/sched/rt.h>
11 #include <linux/six.h>
12 #include <linux/slab.h>
15 #define EBUG_ON(cond) BUG_ON(cond)
17 #define EBUG_ON(cond) do {} while (0)
20 #define six_acquire(l, t, r) lock_acquire(l, 0, t, r, 1, NULL, _RET_IP_)
21 #define six_release(l) lock_release(l, _RET_IP_)
23 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type);
25 struct six_lock_vals {
26 /* Value we add to the lock in order to take the lock: */
29 /* If the lock has this value (used as a mask), taking the lock fails: */
32 /* Value we add to the lock in order to release the lock: */
35 /* Mask that indicates lock is held for this type: */
38 /* Waitlist we wakeup when releasing the lock: */
39 enum six_lock_type unlock_wakeup;
42 #define __SIX_LOCK_HELD_read __SIX_VAL(read_lock, ~0)
43 #define __SIX_LOCK_HELD_intent __SIX_VAL(intent_lock, ~0)
44 #define __SIX_LOCK_HELD_write __SIX_VAL(seq, 1)
48 .lock_val = __SIX_VAL(read_lock, 1), \
49 .lock_fail = __SIX_LOCK_HELD_write + __SIX_VAL(write_locking, 1),\
50 .unlock_val = -__SIX_VAL(read_lock, 1), \
51 .held_mask = __SIX_LOCK_HELD_read, \
52 .unlock_wakeup = SIX_LOCK_write, \
54 [SIX_LOCK_intent] = { \
55 .lock_val = __SIX_VAL(intent_lock, 1), \
56 .lock_fail = __SIX_LOCK_HELD_intent, \
57 .unlock_val = -__SIX_VAL(intent_lock, 1), \
58 .held_mask = __SIX_LOCK_HELD_intent, \
59 .unlock_wakeup = SIX_LOCK_intent, \
61 [SIX_LOCK_write] = { \
62 .lock_val = __SIX_VAL(seq, 1), \
63 .lock_fail = __SIX_LOCK_HELD_read, \
64 .unlock_val = __SIX_VAL(seq, 1), \
65 .held_mask = __SIX_LOCK_HELD_write, \
66 .unlock_wakeup = SIX_LOCK_read, \
70 static inline void six_set_owner(struct six_lock *lock, enum six_lock_type type,
71 union six_lock_state old,
72 struct task_struct *owner)
74 if (type != SIX_LOCK_intent)
77 if (!old.intent_lock) {
81 EBUG_ON(lock->owner != current);
85 static inline unsigned pcpu_read_count(struct six_lock *lock)
87 unsigned read_count = 0;
90 for_each_possible_cpu(cpu)
91 read_count += *per_cpu_ptr(lock->readers, cpu);
95 /* This is probably up there with the more evil things I've done */
96 #define waitlist_bitnr(id) ilog2((((union six_lock_state) { .waiters = 1 << (id) }).l))
98 static int __do_six_trylock_type(struct six_lock *lock,
99 enum six_lock_type type,
100 struct task_struct *task,
103 const struct six_lock_vals l[] = LOCK_VALS;
104 union six_lock_state old, new;
108 EBUG_ON(type == SIX_LOCK_write && lock->owner != task);
109 EBUG_ON(type == SIX_LOCK_write && (lock->state.seq & 1));
110 EBUG_ON(type == SIX_LOCK_write && (try != !(lock->state.write_locking)));
113 * Percpu reader mode:
115 * The basic idea behind this algorithm is that you can implement a lock
116 * between two threads without any atomics, just memory barriers:
118 * For two threads you'll need two variables, one variable for "thread a
119 * has the lock" and another for "thread b has the lock".
121 * To take the lock, a thread sets its variable indicating that it holds
122 * the lock, then issues a full memory barrier, then reads from the
123 * other thread's variable to check if the other thread thinks it has
124 * the lock. If we raced, we backoff and retry/sleep.
127 if (type == SIX_LOCK_read && lock->readers) {
129 this_cpu_inc(*lock->readers); /* signal that we own lock */
133 old.v = READ_ONCE(lock->state.v);
134 ret = !(old.v & l[type].lock_fail);
136 this_cpu_sub(*lock->readers, !ret);
140 * If we failed because a writer was trying to take the
141 * lock, issue a wakeup because we might have caused a
142 * spurious trylock failure:
144 if (old.write_locking)
145 ret = -1 - SIX_LOCK_write;
146 } else if (type == SIX_LOCK_write && lock->readers) {
148 atomic64_add(__SIX_VAL(write_locking, 1),
149 &lock->state.counter);
150 smp_mb__after_atomic();
153 ret = !pcpu_read_count(lock);
156 * On success, we increment lock->seq; also we clear
157 * write_locking unless we failed from the lock path:
161 v += __SIX_VAL(seq, 1);
163 v -= __SIX_VAL(write_locking, 1);
165 if (!ret && !try && !(lock->state.waiters & (1 << SIX_LOCK_write)))
166 v += __SIX_VAL(waiters, 1 << SIX_LOCK_write);
169 old.v = atomic64_add_return(v, &lock->state.counter);
170 if (old.waiters & (1 << SIX_LOCK_read))
171 ret = -1 - SIX_LOCK_read;
173 atomic64_add(v, &lock->state.counter);
176 v = READ_ONCE(lock->state.v);
180 if (!(old.v & l[type].lock_fail)) {
181 new.v += l[type].lock_val;
183 if (type == SIX_LOCK_write)
184 new.write_locking = 0;
185 } else if (!try && !(new.waiters & (1 << type)))
186 new.waiters |= 1 << type;
188 break; /* waiting bit already set */
189 } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
190 old.v, new.v)) != old.v);
192 ret = !(old.v & l[type].lock_fail);
194 EBUG_ON(ret && !(lock->state.v & l[type].held_mask));
198 six_set_owner(lock, type, old, task);
200 EBUG_ON(type == SIX_LOCK_write && (try || ret > 0) && (lock->state.write_locking));
205 static inline void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_type)
207 struct six_lock_waiter *w, *next;
208 struct task_struct *task;
214 raw_spin_lock(&lock->wait_lock);
216 list_for_each_entry_safe(w, next, &lock->wait_list, list) {
217 if (w->lock_want != lock_type)
220 if (saw_one && lock_type != SIX_LOCK_read)
224 ret = __do_six_trylock_type(lock, lock_type, w->task, false);
228 __list_del(w->list.prev, w->list.next);
231 * Do no writes to @w besides setting lock_acquired - otherwise
232 * we would need a memory barrier:
235 w->lock_acquired = true;
236 wake_up_process(task);
239 clear_bit(waitlist_bitnr(lock_type), (unsigned long *) &lock->state.v);
241 raw_spin_unlock(&lock->wait_lock);
244 lock_type = -ret - 1;
249 static inline void six_lock_wakeup(struct six_lock *lock,
250 union six_lock_state state,
251 enum six_lock_type lock_type)
253 if (lock_type == SIX_LOCK_write && state.read_lock)
256 if (!(state.waiters & (1 << lock_type)))
259 __six_lock_wakeup(lock, lock_type);
262 static bool do_six_trylock_type(struct six_lock *lock,
263 enum six_lock_type type,
268 ret = __do_six_trylock_type(lock, type, current, try);
270 __six_lock_wakeup(lock, -ret - 1);
275 __always_inline __flatten
276 static bool __six_trylock_type(struct six_lock *lock, enum six_lock_type type)
278 if (!do_six_trylock_type(lock, type, true))
281 if (type != SIX_LOCK_write)
282 six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read);
286 __always_inline __flatten
287 static bool __six_relock_type(struct six_lock *lock, enum six_lock_type type,
290 const struct six_lock_vals l[] = LOCK_VALS;
291 union six_lock_state old;
294 EBUG_ON(type == SIX_LOCK_write);
296 if (type == SIX_LOCK_read &&
301 this_cpu_inc(*lock->readers);
305 old.v = READ_ONCE(lock->state.v);
306 ret = !(old.v & l[type].lock_fail) && old.seq == seq;
308 this_cpu_sub(*lock->readers, !ret);
312 * Similar to the lock path, we may have caused a spurious write
313 * lock fail and need to issue a wakeup:
315 if (old.write_locking)
316 six_lock_wakeup(lock, old, SIX_LOCK_write);
319 six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read);
324 v = READ_ONCE(lock->state.v);
328 if (old.seq != seq || old.v & l[type].lock_fail)
330 } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
332 old.v + l[type].lock_val)) != old.v);
334 six_set_owner(lock, type, old, current);
335 if (type != SIX_LOCK_write)
336 six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read);
340 #ifdef CONFIG_LOCK_SPIN_ON_OWNER
342 static inline bool six_optimistic_spin(struct six_lock *lock,
343 struct six_lock_waiter *wait)
345 struct task_struct *owner, *task = current;
347 switch (wait->lock_want) {
350 case SIX_LOCK_intent:
351 if (lock->wait_list.next != &wait->list)
359 owner = READ_ONCE(lock->owner);
361 while (owner && lock->owner == owner) {
363 * Ensure we emit the owner->on_cpu, dereference _after_
364 * checking lock->owner still matches owner. If that fails,
365 * owner might point to freed memory. If it still matches,
366 * the rcu_read_lock() ensures the memory stays valid.
371 * If we're an RT task that will live-lock because we won't let
372 * the owner complete.
374 if (wait->lock_acquired ||
384 return wait->lock_acquired;
387 #else /* CONFIG_LOCK_SPIN_ON_OWNER */
389 static inline bool six_optimistic_spin(struct six_lock *lock,
390 struct six_lock_waiter *wait)
398 static int __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type type,
399 struct six_lock_waiter *wait,
400 six_lock_should_sleep_fn should_sleep_fn, void *p)
402 union six_lock_state old;
405 if (type == SIX_LOCK_write) {
406 EBUG_ON(lock->state.write_locking);
407 atomic64_add(__SIX_VAL(write_locking, 1), &lock->state.counter);
408 smp_mb__after_atomic();
411 lock_contended(&lock->dep_map, _RET_IP_);
413 wait->task = current;
414 wait->lock_want = type;
415 wait->lock_acquired = false;
417 raw_spin_lock(&lock->wait_lock);
418 if (!(lock->state.waiters & (1 << type)))
419 set_bit(waitlist_bitnr(type), (unsigned long *) &lock->state.v);
421 * Retry taking the lock after taking waitlist lock, have raced with an
424 ret = __do_six_trylock_type(lock, type, current, false);
426 wait->start_time = local_clock();
428 if (!list_empty(&lock->wait_list)) {
429 struct six_lock_waiter *last =
430 list_last_entry(&lock->wait_list,
431 struct six_lock_waiter, list);
433 if (time_before_eq64(wait->start_time, last->start_time))
434 wait->start_time = last->start_time + 1;
437 list_add_tail(&wait->list, &lock->wait_list);
439 raw_spin_unlock(&lock->wait_lock);
441 if (unlikely(ret > 0)) {
446 if (unlikely(ret < 0)) {
447 __six_lock_wakeup(lock, -ret - 1);
451 if (six_optimistic_spin(lock, wait))
455 set_current_state(TASK_UNINTERRUPTIBLE);
457 if (wait->lock_acquired)
460 ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
462 raw_spin_lock(&lock->wait_lock);
463 if (!wait->lock_acquired)
464 list_del(&wait->list);
465 raw_spin_unlock(&lock->wait_lock);
467 if (wait->lock_acquired)
468 do_six_unlock_type(lock, type);
475 __set_current_state(TASK_RUNNING);
477 if (ret && type == SIX_LOCK_write && lock->state.write_locking) {
478 old.v = atomic64_sub_return(__SIX_VAL(write_locking, 1),
479 &lock->state.counter);
480 six_lock_wakeup(lock, old, SIX_LOCK_read);
486 __always_inline __flatten
487 static int __six_lock_type_waiter(struct six_lock *lock, enum six_lock_type type,
488 struct six_lock_waiter *wait,
489 six_lock_should_sleep_fn should_sleep_fn, void *p)
493 wait->start_time = 0;
495 if (type != SIX_LOCK_write)
496 six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read);
498 ret = do_six_trylock_type(lock, type, true) ? 0
499 : __six_lock_type_slowpath(lock, type, wait, should_sleep_fn, p);
501 if (ret && type != SIX_LOCK_write)
502 six_release(&lock->dep_map);
504 lock_acquired(&lock->dep_map, _RET_IP_);
510 static int __six_lock_type(struct six_lock *lock, enum six_lock_type type,
511 six_lock_should_sleep_fn should_sleep_fn, void *p)
513 struct six_lock_waiter wait;
515 return __six_lock_type_waiter(lock, type, &wait, should_sleep_fn, p);
518 __always_inline __flatten
519 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type)
521 const struct six_lock_vals l[] = LOCK_VALS;
522 union six_lock_state state;
524 if (type == SIX_LOCK_intent)
527 if (type == SIX_LOCK_read &&
529 smp_mb(); /* unlock barrier */
530 this_cpu_dec(*lock->readers);
531 smp_mb(); /* between unlocking and checking for waiters */
532 state.v = READ_ONCE(lock->state.v);
534 EBUG_ON(!(lock->state.v & l[type].held_mask));
535 state.v = atomic64_add_return_release(l[type].unlock_val,
536 &lock->state.counter);
539 six_lock_wakeup(lock, state, l[type].unlock_wakeup);
542 __always_inline __flatten
543 static void __six_unlock_type(struct six_lock *lock, enum six_lock_type type)
545 EBUG_ON(type == SIX_LOCK_write &&
546 !(lock->state.v & __SIX_LOCK_HELD_intent));
547 EBUG_ON((type == SIX_LOCK_write ||
548 type == SIX_LOCK_intent) &&
549 lock->owner != current);
551 if (type != SIX_LOCK_write)
552 six_release(&lock->dep_map);
554 if (type == SIX_LOCK_intent &&
555 lock->intent_lock_recurse) {
556 --lock->intent_lock_recurse;
560 do_six_unlock_type(lock, type);
563 #define __SIX_LOCK(type) \
564 bool six_trylock_##type(struct six_lock *lock) \
566 return __six_trylock_type(lock, SIX_LOCK_##type); \
568 EXPORT_SYMBOL_GPL(six_trylock_##type); \
570 bool six_relock_##type(struct six_lock *lock, u32 seq) \
572 return __six_relock_type(lock, SIX_LOCK_##type, seq); \
574 EXPORT_SYMBOL_GPL(six_relock_##type); \
576 int six_lock_##type(struct six_lock *lock, \
577 six_lock_should_sleep_fn should_sleep_fn, void *p) \
579 return __six_lock_type(lock, SIX_LOCK_##type, should_sleep_fn, p);\
581 EXPORT_SYMBOL_GPL(six_lock_##type); \
583 int six_lock_waiter_##type(struct six_lock *lock, \
584 struct six_lock_waiter *wait, \
585 six_lock_should_sleep_fn should_sleep_fn, void *p)\
587 return __six_lock_type_waiter(lock, SIX_LOCK_##type, wait, should_sleep_fn, p);\
589 EXPORT_SYMBOL_GPL(six_lock_waiter_##type); \
591 void six_unlock_##type(struct six_lock *lock) \
593 __six_unlock_type(lock, SIX_LOCK_##type); \
595 EXPORT_SYMBOL_GPL(six_unlock_##type);
603 /* Convert from intent to read: */
604 void six_lock_downgrade(struct six_lock *lock)
606 six_lock_increment(lock, SIX_LOCK_read);
607 six_unlock_intent(lock);
609 EXPORT_SYMBOL_GPL(six_lock_downgrade);
611 bool six_lock_tryupgrade(struct six_lock *lock)
613 union six_lock_state old, new;
614 u64 v = READ_ONCE(lock->state.v);
622 if (!lock->readers) {
623 EBUG_ON(!new.read_lock);
628 } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
629 old.v, new.v)) != old.v);
632 this_cpu_dec(*lock->readers);
634 six_set_owner(lock, SIX_LOCK_intent, old, current);
638 EXPORT_SYMBOL_GPL(six_lock_tryupgrade);
640 bool six_trylock_convert(struct six_lock *lock,
641 enum six_lock_type from,
642 enum six_lock_type to)
644 EBUG_ON(to == SIX_LOCK_write || from == SIX_LOCK_write);
649 if (to == SIX_LOCK_read) {
650 six_lock_downgrade(lock);
653 return six_lock_tryupgrade(lock);
656 EXPORT_SYMBOL_GPL(six_trylock_convert);
659 * Increment read/intent lock count, assuming we already have it read or intent
662 void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
664 const struct six_lock_vals l[] = LOCK_VALS;
666 six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read);
668 /* XXX: assert already locked, and that we don't overflow: */
673 this_cpu_inc(*lock->readers);
675 EBUG_ON(!lock->state.read_lock &&
676 !lock->state.intent_lock);
677 atomic64_add(l[type].lock_val, &lock->state.counter);
680 case SIX_LOCK_intent:
681 EBUG_ON(!lock->state.intent_lock);
682 lock->intent_lock_recurse++;
689 EXPORT_SYMBOL_GPL(six_lock_increment);
691 void six_lock_wakeup_all(struct six_lock *lock)
693 union six_lock_state state = lock->state;
694 struct six_lock_waiter *w;
696 six_lock_wakeup(lock, state, SIX_LOCK_read);
697 six_lock_wakeup(lock, state, SIX_LOCK_intent);
698 six_lock_wakeup(lock, state, SIX_LOCK_write);
700 raw_spin_lock(&lock->wait_lock);
701 list_for_each_entry(w, &lock->wait_list, list)
702 wake_up_process(w->task);
703 raw_spin_unlock(&lock->wait_lock);
705 EXPORT_SYMBOL_GPL(six_lock_wakeup_all);
707 void six_lock_pcpu_free(struct six_lock *lock)
709 BUG_ON(lock->readers && pcpu_read_count(lock));
710 BUG_ON(lock->state.read_lock);
712 free_percpu(lock->readers);
713 lock->readers = NULL;
715 EXPORT_SYMBOL_GPL(six_lock_pcpu_free);
717 void six_lock_pcpu_alloc(struct six_lock *lock)
721 lock->readers = alloc_percpu(unsigned);
724 EXPORT_SYMBOL_GPL(six_lock_pcpu_alloc);
727 * Returns lock held counts, for both read and intent
729 struct six_lock_count six_lock_counts(struct six_lock *lock)
731 struct six_lock_count ret;
733 ret.n[SIX_LOCK_read] = 0;
734 ret.n[SIX_LOCK_intent] = lock->state.intent_lock + lock->intent_lock_recurse;
735 ret.n[SIX_LOCK_write] = lock->state.seq & 1;
738 ret.n[SIX_LOCK_read] += lock->state.read_lock;
742 for_each_possible_cpu(cpu)
743 ret.n[SIX_LOCK_read] += *per_cpu_ptr(lock->readers, cpu);
748 EXPORT_SYMBOL_GPL(six_lock_counts);