1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/export.h>
4 #include <linux/log2.h>
5 #include <linux/percpu.h>
6 #include <linux/preempt.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/sched/rt.h>
11 #include <linux/six.h>
12 #include <linux/slab.h>
14 #include <trace/events/lock.h>
17 #define EBUG_ON(cond) BUG_ON(cond)
19 #define EBUG_ON(cond) do {} while (0)
22 #define six_acquire(l, t, r, ip) lock_acquire(l, 0, t, r, 1, NULL, ip)
23 #define six_release(l, ip) lock_release(l, ip)
25 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type);
27 #define SIX_LOCK_HELD_read_OFFSET 0
28 #define SIX_LOCK_HELD_read ~(~0U << 26)
29 #define SIX_LOCK_HELD_intent (1U << 26)
30 #define SIX_LOCK_HELD_write (1U << 27)
31 #define SIX_LOCK_WAITING_read (1U << (28 + SIX_LOCK_read))
32 #define SIX_LOCK_WAITING_intent (1U << (28 + SIX_LOCK_intent))
33 #define SIX_LOCK_WAITING_write (1U << (28 + SIX_LOCK_write))
34 #define SIX_LOCK_NOSPIN (1U << 31)
36 struct six_lock_vals {
37 /* Value we add to the lock in order to take the lock: */
40 /* If the lock has this value (used as a mask), taking the lock fails: */
43 /* Mask that indicates lock is held for this type: */
46 /* Waitlist we wakeup when releasing the lock: */
47 enum six_lock_type unlock_wakeup;
52 .lock_val = 1U << SIX_LOCK_HELD_read_OFFSET, \
53 .lock_fail = SIX_LOCK_HELD_write, \
54 .held_mask = SIX_LOCK_HELD_read, \
55 .unlock_wakeup = SIX_LOCK_write, \
57 [SIX_LOCK_intent] = { \
58 .lock_val = SIX_LOCK_HELD_intent, \
59 .lock_fail = SIX_LOCK_HELD_intent, \
60 .held_mask = SIX_LOCK_HELD_intent, \
61 .unlock_wakeup = SIX_LOCK_intent, \
63 [SIX_LOCK_write] = { \
64 .lock_val = SIX_LOCK_HELD_write, \
65 .lock_fail = SIX_LOCK_HELD_read, \
66 .held_mask = SIX_LOCK_HELD_write, \
67 .unlock_wakeup = SIX_LOCK_read, \
71 static inline void six_set_bitmask(struct six_lock *lock, u32 mask)
73 if ((atomic_read(&lock->state) & mask) != mask)
74 atomic_or(mask, &lock->state);
77 static inline void six_clear_bitmask(struct six_lock *lock, u32 mask)
79 if (atomic_read(&lock->state) & mask)
80 atomic_and(~mask, &lock->state);
83 static inline void six_set_owner(struct six_lock *lock, enum six_lock_type type,
84 u32 old, struct task_struct *owner)
86 if (type != SIX_LOCK_intent)
89 if (!(old & SIX_LOCK_HELD_intent)) {
93 EBUG_ON(lock->owner != current);
97 static inline unsigned pcpu_read_count(struct six_lock *lock)
99 unsigned read_count = 0;
102 for_each_possible_cpu(cpu)
103 read_count += *per_cpu_ptr(lock->readers, cpu);
108 * __do_six_trylock() - main trylock routine
110 * Returns 1 on success, 0 on failure
112 * In percpu reader mode, a failed trylock may cause a spurious trylock failure
113 * for anoter thread taking the competing lock type, and we may havve to do a
114 * wakeup: when a wakeup is required, we return -1 - wakeup_type.
116 static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type,
117 struct task_struct *task, bool try)
119 const struct six_lock_vals l[] = LOCK_VALS;
123 EBUG_ON(type == SIX_LOCK_write && lock->owner != task);
124 EBUG_ON(type == SIX_LOCK_write &&
125 (try != !(atomic_read(&lock->state) & SIX_LOCK_HELD_write)));
128 * Percpu reader mode:
130 * The basic idea behind this algorithm is that you can implement a lock
131 * between two threads without any atomics, just memory barriers:
133 * For two threads you'll need two variables, one variable for "thread a
134 * has the lock" and another for "thread b has the lock".
136 * To take the lock, a thread sets its variable indicating that it holds
137 * the lock, then issues a full memory barrier, then reads from the
138 * other thread's variable to check if the other thread thinks it has
139 * the lock. If we raced, we backoff and retry/sleep.
141 * Failure to take the lock may cause a spurious trylock failure in
142 * another thread, because we temporarily set the lock to indicate that
143 * we held it. This would be a problem for a thread in six_lock(), when
144 * they are calling trylock after adding themself to the waitlist and
147 * Therefore, if we fail to get the lock, and there were waiters of the
148 * type we conflict with, we will have to issue a wakeup.
150 * Since we may be called under wait_lock (and by the wakeup code
151 * itself), we return that the wakeup has to be done instead of doing it
154 if (type == SIX_LOCK_read && lock->readers) {
156 this_cpu_inc(*lock->readers); /* signal that we own lock */
160 old = atomic_read(&lock->state);
161 ret = !(old & l[type].lock_fail);
163 this_cpu_sub(*lock->readers, !ret);
166 if (!ret && (old & SIX_LOCK_WAITING_write))
167 ret = -1 - SIX_LOCK_write;
168 } else if (type == SIX_LOCK_write && lock->readers) {
170 atomic_add(SIX_LOCK_HELD_write, &lock->state);
171 smp_mb__after_atomic();
174 ret = !pcpu_read_count(lock);
177 old = atomic_sub_return(SIX_LOCK_HELD_write, &lock->state);
178 if (old & SIX_LOCK_WAITING_read)
179 ret = -1 - SIX_LOCK_read;
182 old = atomic_read(&lock->state);
184 ret = !(old & l[type].lock_fail);
185 if (!ret || (type == SIX_LOCK_write && !try)) {
189 } while (!atomic_try_cmpxchg_acquire(&lock->state, &old, old + l[type].lock_val));
191 EBUG_ON(ret && !(atomic_read(&lock->state) & l[type].held_mask));
195 six_set_owner(lock, type, old, task);
197 EBUG_ON(type == SIX_LOCK_write && try && ret <= 0 &&
198 (atomic_read(&lock->state) & SIX_LOCK_HELD_write));
203 static void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_type)
205 struct six_lock_waiter *w, *next;
206 struct task_struct *task;
212 raw_spin_lock(&lock->wait_lock);
214 list_for_each_entry_safe(w, next, &lock->wait_list, list) {
215 if (w->lock_want != lock_type)
218 if (saw_one && lock_type != SIX_LOCK_read)
222 ret = __do_six_trylock(lock, lock_type, w->task, false);
226 __list_del(w->list.prev, w->list.next);
229 * Do no writes to @w besides setting lock_acquired - otherwise
230 * we would need a memory barrier:
233 w->lock_acquired = true;
234 wake_up_process(task);
237 six_clear_bitmask(lock, SIX_LOCK_WAITING_read << lock_type);
239 raw_spin_unlock(&lock->wait_lock);
242 lock_type = -ret - 1;
248 static void six_lock_wakeup(struct six_lock *lock, u32 state,
249 enum six_lock_type lock_type)
251 if (lock_type == SIX_LOCK_write && (state & SIX_LOCK_HELD_read))
254 if (!(state & (SIX_LOCK_WAITING_read << lock_type)))
257 __six_lock_wakeup(lock, lock_type);
261 static bool do_six_trylock(struct six_lock *lock, enum six_lock_type type, bool try)
265 ret = __do_six_trylock(lock, type, current, try);
267 __six_lock_wakeup(lock, -ret - 1);
273 * six_trylock_ip - attempt to take a six lock without blocking
274 * @lock: lock to take
275 * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
276 * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
278 * Return: true on success, false on failure.
280 bool six_trylock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip)
282 if (!do_six_trylock(lock, type, true))
285 if (type != SIX_LOCK_write)
286 six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read, ip);
289 EXPORT_SYMBOL_GPL(six_trylock_ip);
292 * six_relock_ip - attempt to re-take a lock that was held previously
293 * @lock: lock to take
294 * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
295 * @seq: lock sequence number obtained from six_lock_seq() while lock was
297 * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
299 * Return: true on success, false on failure.
301 bool six_relock_ip(struct six_lock *lock, enum six_lock_type type,
302 unsigned seq, unsigned long ip)
304 if (lock->seq != seq || !six_trylock_ip(lock, type, ip))
307 if (lock->seq != seq) {
308 six_unlock_ip(lock, type, ip);
314 EXPORT_SYMBOL_GPL(six_relock_ip);
316 #ifdef CONFIG_LOCK_SPIN_ON_OWNER
318 static inline bool six_can_spin_on_owner(struct six_lock *lock)
320 struct task_struct *owner;
327 owner = READ_ONCE(lock->owner);
328 ret = !owner || owner_on_cpu(owner);
334 static inline bool six_spin_on_owner(struct six_lock *lock,
335 struct task_struct *owner,
342 while (lock->owner == owner) {
344 * Ensure we emit the owner->on_cpu, dereference _after_
345 * checking lock->owner still matches owner. If that fails,
346 * owner might point to freed memory. If it still matches,
347 * the rcu_read_lock() ensures the memory stays valid.
351 if (!owner_on_cpu(owner) || need_resched()) {
356 if (!(++loop & 0xf) && (time_after64(sched_clock(), end_time))) {
357 six_set_bitmask(lock, SIX_LOCK_NOSPIN);
369 static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type type)
371 struct task_struct *task = current;
374 if (type == SIX_LOCK_write)
378 if (!six_can_spin_on_owner(lock))
381 if (!osq_lock(&lock->osq))
384 end_time = sched_clock() + 10 * NSEC_PER_USEC;
387 struct task_struct *owner;
390 * If there's an owner, wait for it to either
391 * release the lock or go to sleep.
393 owner = READ_ONCE(lock->owner);
394 if (owner && !six_spin_on_owner(lock, owner, end_time))
397 if (do_six_trylock(lock, type, false)) {
398 osq_unlock(&lock->osq);
404 * When there's no owner, we might have preempted between the
405 * owner acquiring the lock and setting the owner field. If
406 * we're an RT task that will live-lock because we won't let
407 * the owner complete.
409 if (!owner && (need_resched() || rt_task(task)))
413 * The cpu_relax() call is a compiler barrier which forces
414 * everything in this loop to be re-loaded. We don't need
415 * memory barriers as we'll eventually observe the right
416 * values at the cost of a few extra spins.
421 osq_unlock(&lock->osq);
426 * If we fell out of the spin path because of need_resched(),
427 * reschedule now, before we try-lock again. This avoids getting
428 * scheduled out right after we obtained the lock.
436 #else /* CONFIG_LOCK_SPIN_ON_OWNER */
438 static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type type)
446 static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type,
447 struct six_lock_waiter *wait,
448 six_lock_should_sleep_fn should_sleep_fn, void *p,
453 if (type == SIX_LOCK_write) {
454 EBUG_ON(atomic_read(&lock->state) & SIX_LOCK_HELD_write);
455 atomic_add(SIX_LOCK_HELD_write, &lock->state);
456 smp_mb__after_atomic();
459 trace_contention_begin(lock, 0);
460 lock_contended(&lock->dep_map, ip);
462 if (six_optimistic_spin(lock, type))
465 wait->task = current;
466 wait->lock_want = type;
467 wait->lock_acquired = false;
469 raw_spin_lock(&lock->wait_lock);
470 six_set_bitmask(lock, SIX_LOCK_WAITING_read << type);
472 * Retry taking the lock after taking waitlist lock, in case we raced
475 ret = __do_six_trylock(lock, type, current, false);
477 wait->start_time = local_clock();
479 if (!list_empty(&lock->wait_list)) {
480 struct six_lock_waiter *last =
481 list_last_entry(&lock->wait_list,
482 struct six_lock_waiter, list);
484 if (time_before_eq64(wait->start_time, last->start_time))
485 wait->start_time = last->start_time + 1;
488 list_add_tail(&wait->list, &lock->wait_list);
490 raw_spin_unlock(&lock->wait_lock);
492 if (unlikely(ret > 0)) {
497 if (unlikely(ret < 0)) {
498 __six_lock_wakeup(lock, -ret - 1);
503 set_current_state(TASK_UNINTERRUPTIBLE);
505 if (wait->lock_acquired)
508 ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
510 raw_spin_lock(&lock->wait_lock);
511 if (!wait->lock_acquired)
512 list_del(&wait->list);
513 raw_spin_unlock(&lock->wait_lock);
515 if (unlikely(wait->lock_acquired))
516 do_six_unlock_type(lock, type);
523 __set_current_state(TASK_RUNNING);
525 if (ret && type == SIX_LOCK_write) {
526 six_clear_bitmask(lock, SIX_LOCK_HELD_write);
527 six_lock_wakeup(lock, atomic_read(&lock->state), SIX_LOCK_read);
529 trace_contention_end(lock, 0);
535 * six_lock_ip_waiter - take a lock, with full waitlist interface
536 * @lock: lock to take
537 * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
538 * @wait: pointer to wait object, which will be added to lock's waitlist
539 * @should_sleep_fn: callback run after adding to waitlist, immediately prior
541 * @p: passed through to @should_sleep_fn
542 * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
544 * This is the most general six_lock() variant, with parameters to support full
545 * cycle detection for deadlock avoidance.
547 * The code calling this function must implement tracking of held locks, and the
548 * @wait object should be embedded into the struct that tracks held locks -
549 * which must also be accessible in a thread-safe way.
551 * @should_sleep_fn should invoke the cycle detector; it should walk each
552 * lock's waiters, and for each waiter recursively walk their held locks.
554 * When this function must block, @wait will be added to @lock's waitlist before
555 * calling trylock, and before calling @should_sleep_fn, and @wait will not be
556 * removed from the lock waitlist until the lock has been successfully acquired,
559 * @wait.start_time will be monotonically increasing for any given waitlist, and
560 * thus may be used as a loop cursor.
562 * Return: 0 on success, or the return code from @should_sleep_fn on failure.
564 int six_lock_ip_waiter(struct six_lock *lock, enum six_lock_type type,
565 struct six_lock_waiter *wait,
566 six_lock_should_sleep_fn should_sleep_fn, void *p,
571 wait->start_time = 0;
573 if (type != SIX_LOCK_write)
574 six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, ip);
576 ret = do_six_trylock(lock, type, true) ? 0
577 : six_lock_slowpath(lock, type, wait, should_sleep_fn, p, ip);
579 if (ret && type != SIX_LOCK_write)
580 six_release(&lock->dep_map, ip);
582 lock_acquired(&lock->dep_map, ip);
586 EXPORT_SYMBOL_GPL(six_lock_ip_waiter);
589 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type)
591 const struct six_lock_vals l[] = LOCK_VALS;
594 if (type == SIX_LOCK_intent)
597 if (type == SIX_LOCK_read &&
599 smp_mb(); /* unlock barrier */
600 this_cpu_dec(*lock->readers);
601 smp_mb(); /* between unlocking and checking for waiters */
602 state = atomic_read(&lock->state);
604 u32 v = l[type].lock_val;
606 if (type != SIX_LOCK_read)
607 v += atomic_read(&lock->state) & SIX_LOCK_NOSPIN;
609 EBUG_ON(!(atomic_read(&lock->state) & l[type].held_mask));
610 state = atomic_sub_return_release(v, &lock->state);
613 six_lock_wakeup(lock, state, l[type].unlock_wakeup);
617 * six_unlock_ip - drop a six lock
618 * @lock: lock to unlock
619 * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
620 * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
622 * When a lock is held multiple times (because six_lock_incement()) was used),
623 * this decrements the 'lock held' counter by one.
626 * six_lock_read(&foo->lock); read count 1
627 * six_lock_increment(&foo->lock, SIX_LOCK_read); read count 2
628 * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 1
629 * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 0
631 void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip)
633 EBUG_ON(type == SIX_LOCK_write &&
634 !(atomic_read(&lock->state) & SIX_LOCK_HELD_intent));
635 EBUG_ON((type == SIX_LOCK_write ||
636 type == SIX_LOCK_intent) &&
637 lock->owner != current);
639 if (type != SIX_LOCK_write)
640 six_release(&lock->dep_map, ip);
642 if (type == SIX_LOCK_intent &&
643 lock->intent_lock_recurse) {
644 --lock->intent_lock_recurse;
648 lock->seq += type == SIX_LOCK_write;
650 do_six_unlock_type(lock, type);
652 EXPORT_SYMBOL_GPL(six_unlock_ip);
655 * six_lock_downgrade - convert an intent lock to a read lock
656 * @lock: lock to dowgrade
658 * @lock will have read count incremented and intent count decremented
660 void six_lock_downgrade(struct six_lock *lock)
662 six_lock_increment(lock, SIX_LOCK_read);
663 six_unlock_intent(lock);
665 EXPORT_SYMBOL_GPL(six_lock_downgrade);
668 * six_lock_tryupgrade - attempt to convert read lock to an intent lock
669 * @lock: lock to upgrade
671 * On success, @lock will have intent count incremented and read count
674 * Return: true on success, false on failure
676 bool six_lock_tryupgrade(struct six_lock *lock)
678 const struct six_lock_vals l[] = LOCK_VALS;
679 u32 old = atomic_read(&lock->state), new;
684 if (new & SIX_LOCK_HELD_intent)
687 if (!lock->readers) {
688 EBUG_ON(!(new & SIX_LOCK_HELD_read));
689 new -= l[SIX_LOCK_read].lock_val;
692 new |= SIX_LOCK_HELD_intent;
693 } while (!atomic_try_cmpxchg_acquire(&lock->state, &old, new));
696 this_cpu_dec(*lock->readers);
698 six_set_owner(lock, SIX_LOCK_intent, old, current);
702 EXPORT_SYMBOL_GPL(six_lock_tryupgrade);
705 * six_trylock_convert - attempt to convert a held lock from one type to another
706 * @lock: lock to upgrade
707 * @from: SIX_LOCK_read or SIX_LOCK_intent
708 * @to: SIX_LOCK_read or SIX_LOCK_intent
710 * On success, @lock will have intent count incremented and read count
713 * Return: true on success, false on failure
715 bool six_trylock_convert(struct six_lock *lock,
716 enum six_lock_type from,
717 enum six_lock_type to)
719 EBUG_ON(to == SIX_LOCK_write || from == SIX_LOCK_write);
724 if (to == SIX_LOCK_read) {
725 six_lock_downgrade(lock);
728 return six_lock_tryupgrade(lock);
731 EXPORT_SYMBOL_GPL(six_trylock_convert);
734 * six_lock_increment - increase held lock count on a lock that is already held
735 * @lock: lock to increment
736 * @type: SIX_LOCK_read or SIX_LOCK_intent
738 * @lock must already be held, with a lock type that is greater than or equal to
741 * A corresponding six_unlock_type() call will be required for @lock to be fully
744 void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
746 const struct six_lock_vals l[] = LOCK_VALS;
748 six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, _RET_IP_);
750 /* XXX: assert already locked, and that we don't overflow: */
755 this_cpu_inc(*lock->readers);
757 EBUG_ON(!(atomic_read(&lock->state) &
759 SIX_LOCK_HELD_intent)));
760 atomic_add(l[type].lock_val, &lock->state);
763 case SIX_LOCK_intent:
764 EBUG_ON(!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent));
765 lock->intent_lock_recurse++;
772 EXPORT_SYMBOL_GPL(six_lock_increment);
775 * six_lock_wakeup_all - wake up all waiters on @lock
776 * @lock: lock to wake up waiters for
778 * Wakeing up waiters will cause them to re-run should_sleep_fn, which may then
779 * abort the lock operation.
781 * This function is never needed in a bug-free program; it's only useful in
782 * debug code, e.g. to determine if a cycle detector is at fault.
784 void six_lock_wakeup_all(struct six_lock *lock)
786 u32 state = atomic_read(&lock->state);
787 struct six_lock_waiter *w;
789 six_lock_wakeup(lock, state, SIX_LOCK_read);
790 six_lock_wakeup(lock, state, SIX_LOCK_intent);
791 six_lock_wakeup(lock, state, SIX_LOCK_write);
793 raw_spin_lock(&lock->wait_lock);
794 list_for_each_entry(w, &lock->wait_list, list)
795 wake_up_process(w->task);
796 raw_spin_unlock(&lock->wait_lock);
798 EXPORT_SYMBOL_GPL(six_lock_wakeup_all);
801 * six_lock_counts - return held lock counts, for each lock type
802 * @lock: lock to return counters for
804 * Return: the number of times a lock is held for read, intent and write.
806 struct six_lock_count six_lock_counts(struct six_lock *lock)
808 struct six_lock_count ret;
810 ret.n[SIX_LOCK_read] = !lock->readers
811 ? atomic_read(&lock->state) & SIX_LOCK_HELD_read
812 : pcpu_read_count(lock);
813 ret.n[SIX_LOCK_intent] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent) +
814 lock->intent_lock_recurse;
815 ret.n[SIX_LOCK_write] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_write);
819 EXPORT_SYMBOL_GPL(six_lock_counts);
822 * six_lock_readers_add - directly manipulate reader count of a lock
823 * @lock: lock to add/subtract readers for
824 * @nr: reader count to add/subtract
826 * When an upper layer is implementing lock reentrency, we may have both read
827 * and intent locks on the same lock.
829 * When we need to take a write lock, the read locks will cause self-deadlock,
830 * because six locks themselves do not track which read locks are held by the
831 * current thread and which are held by a different thread - it does no
832 * per-thread tracking of held locks.
834 * The upper layer that is tracking held locks may however, if trylock() has
835 * failed, count up its own read locks, subtract them, take the write lock, and
838 * As in any other situation when taking a write lock, @lock must be held for
839 * intent one (or more) times, so @lock will never be left unlocked.
841 void six_lock_readers_add(struct six_lock *lock, int nr)
844 this_cpu_add(*lock->readers, nr);
846 EBUG_ON((int) (atomic_read(&lock->state) & SIX_LOCK_HELD_read) + nr < 0);
847 /* reader count starts at bit 0 */
848 atomic_add(nr, &lock->state);
851 EXPORT_SYMBOL_GPL(six_lock_readers_add);
854 * six_lock_exit - release resources held by a lock prior to freeing
855 * @lock: lock to exit
857 * When a lock was initialized in percpu mode (SIX_OLCK_INIT_PCPU), this is
858 * required to free the percpu read counts.
860 void six_lock_exit(struct six_lock *lock)
862 WARN_ON(lock->readers && pcpu_read_count(lock));
863 WARN_ON(atomic_read(&lock->state) & SIX_LOCK_HELD_read);
865 free_percpu(lock->readers);
866 lock->readers = NULL;
868 EXPORT_SYMBOL_GPL(six_lock_exit);
870 void __six_lock_init(struct six_lock *lock, const char *name,
871 struct lock_class_key *key, enum six_lock_init_flags flags)
873 atomic_set(&lock->state, 0);
874 raw_spin_lock_init(&lock->wait_lock);
875 INIT_LIST_HEAD(&lock->wait_list);
876 #ifdef CONFIG_DEBUG_LOCK_ALLOC
877 debug_check_no_locks_freed((void *) lock, sizeof(*lock));
878 lockdep_init_map(&lock->dep_map, name, key, 0);
882 * Don't assume that we have real percpu variables available in
886 if (flags & SIX_LOCK_INIT_PCPU) {
888 * We don't return an error here on memory allocation failure
889 * since percpu is an optimization, and locks will work with the
890 * same semantics in non-percpu mode: callers can check for
891 * failure if they wish by checking lock->readers, but generally
892 * will not want to treat it as an error.
894 lock->readers = alloc_percpu(unsigned);
898 EXPORT_SYMBOL_GPL(__six_lock_init);