1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/export.h>
4 #include <linux/log2.h>
5 #include <linux/percpu.h>
6 #include <linux/preempt.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/sched/rt.h>
11 #include <linux/sched/task.h>
12 #include <linux/slab.h>
14 #include <trace/events/lock.h>
19 #define EBUG_ON(cond) BUG_ON(cond)
21 #define EBUG_ON(cond) do {} while (0)
24 #define six_acquire(l, t, r, ip) lock_acquire(l, 0, t, r, 1, NULL, ip)
25 #define six_release(l, ip) lock_release(l, ip)
27 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type);
29 #define SIX_LOCK_HELD_read_OFFSET 0
30 #define SIX_LOCK_HELD_read ~(~0U << 26)
31 #define SIX_LOCK_HELD_intent (1U << 26)
32 #define SIX_LOCK_HELD_write (1U << 27)
33 #define SIX_LOCK_WAITING_read (1U << (28 + SIX_LOCK_read))
34 #define SIX_LOCK_WAITING_write (1U << (28 + SIX_LOCK_write))
35 #define SIX_LOCK_NOSPIN (1U << 31)
37 struct six_lock_vals {
38 /* Value we add to the lock in order to take the lock: */
41 /* If the lock has this value (used as a mask), taking the lock fails: */
44 /* Mask that indicates lock is held for this type: */
47 /* Waitlist we wakeup when releasing the lock: */
48 enum six_lock_type unlock_wakeup;
51 static const struct six_lock_vals l[] = {
53 .lock_val = 1U << SIX_LOCK_HELD_read_OFFSET,
54 .lock_fail = SIX_LOCK_HELD_write,
55 .held_mask = SIX_LOCK_HELD_read,
56 .unlock_wakeup = SIX_LOCK_write,
59 .lock_val = SIX_LOCK_HELD_intent,
60 .lock_fail = SIX_LOCK_HELD_intent,
61 .held_mask = SIX_LOCK_HELD_intent,
62 .unlock_wakeup = SIX_LOCK_intent,
65 .lock_val = SIX_LOCK_HELD_write,
66 .lock_fail = SIX_LOCK_HELD_read,
67 .held_mask = SIX_LOCK_HELD_write,
68 .unlock_wakeup = SIX_LOCK_read,
72 static inline void six_set_bitmask(struct six_lock *lock, u32 mask)
74 if ((atomic_read(&lock->state) & mask) != mask)
75 atomic_or(mask, &lock->state);
78 static inline void six_clear_bitmask(struct six_lock *lock, u32 mask)
80 if (atomic_read(&lock->state) & mask)
81 atomic_and(~mask, &lock->state);
84 static inline void six_set_owner(struct six_lock *lock, enum six_lock_type type,
85 u32 old, struct task_struct *owner)
87 if (type != SIX_LOCK_intent)
90 if (!(old & SIX_LOCK_HELD_intent)) {
94 EBUG_ON(lock->owner != current);
98 static inline unsigned pcpu_read_count(struct six_lock *lock)
100 unsigned read_count = 0;
103 for_each_possible_cpu(cpu)
104 read_count += *per_cpu_ptr(lock->readers, cpu);
109 * __do_six_trylock() - main trylock routine
111 * Returns 1 on success, 0 on failure
113 * In percpu reader mode, a failed trylock may cause a spurious trylock failure
114 * for anoter thread taking the competing lock type, and we may havve to do a
115 * wakeup: when a wakeup is required, we return -1 - wakeup_type.
117 static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type,
118 struct task_struct *task, bool try)
123 EBUG_ON(type == SIX_LOCK_write && lock->owner != task);
124 EBUG_ON(type == SIX_LOCK_write &&
125 (try != !(atomic_read(&lock->state) & SIX_LOCK_HELD_write)));
128 * Percpu reader mode:
130 * The basic idea behind this algorithm is that you can implement a lock
131 * between two threads without any atomics, just memory barriers:
133 * For two threads you'll need two variables, one variable for "thread a
134 * has the lock" and another for "thread b has the lock".
136 * To take the lock, a thread sets its variable indicating that it holds
137 * the lock, then issues a full memory barrier, then reads from the
138 * other thread's variable to check if the other thread thinks it has
139 * the lock. If we raced, we backoff and retry/sleep.
141 * Failure to take the lock may cause a spurious trylock failure in
142 * another thread, because we temporarily set the lock to indicate that
143 * we held it. This would be a problem for a thread in six_lock(), when
144 * they are calling trylock after adding themself to the waitlist and
147 * Therefore, if we fail to get the lock, and there were waiters of the
148 * type we conflict with, we will have to issue a wakeup.
150 * Since we may be called under wait_lock (and by the wakeup code
151 * itself), we return that the wakeup has to be done instead of doing it
154 if (type == SIX_LOCK_read && lock->readers) {
156 this_cpu_inc(*lock->readers); /* signal that we own lock */
160 old = atomic_read(&lock->state);
161 ret = !(old & l[type].lock_fail);
163 this_cpu_sub(*lock->readers, !ret);
166 if (!ret && (old & SIX_LOCK_WAITING_write))
167 ret = -1 - SIX_LOCK_write;
168 } else if (type == SIX_LOCK_write && lock->readers) {
170 atomic_add(SIX_LOCK_HELD_write, &lock->state);
171 smp_mb__after_atomic();
174 ret = !pcpu_read_count(lock);
177 old = atomic_sub_return(SIX_LOCK_HELD_write, &lock->state);
178 if (old & SIX_LOCK_WAITING_read)
179 ret = -1 - SIX_LOCK_read;
182 old = atomic_read(&lock->state);
184 ret = !(old & l[type].lock_fail);
185 if (!ret || (type == SIX_LOCK_write && !try)) {
189 } while (!atomic_try_cmpxchg_acquire(&lock->state, &old, old + l[type].lock_val));
191 EBUG_ON(ret && !(atomic_read(&lock->state) & l[type].held_mask));
195 six_set_owner(lock, type, old, task);
197 EBUG_ON(type == SIX_LOCK_write && try && ret <= 0 &&
198 (atomic_read(&lock->state) & SIX_LOCK_HELD_write));
203 static void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_type)
205 struct six_lock_waiter *w, *next;
206 struct task_struct *task;
212 raw_spin_lock(&lock->wait_lock);
214 list_for_each_entry_safe(w, next, &lock->wait_list, list) {
215 if (w->lock_want != lock_type)
218 if (saw_one && lock_type != SIX_LOCK_read)
222 ret = __do_six_trylock(lock, lock_type, w->task, false);
227 * Similar to percpu_rwsem_wake_function(), we need to guard
228 * against the wakee noticing w->lock_acquired, returning, and
229 * then exiting before we do the wakeup:
231 task = get_task_struct(w->task);
232 __list_del(w->list.prev, w->list.next);
234 * The release barrier here ensures the ordering of the
235 * __list_del before setting w->lock_acquired; @w is on the
236 * stack of the thread doing the waiting and will be reused
237 * after it sees w->lock_acquired with no other locking:
238 * pairs with smp_load_acquire() in six_lock_slowpath()
240 smp_store_release(&w->lock_acquired, true);
241 wake_up_process(task);
242 put_task_struct(task);
245 six_clear_bitmask(lock, SIX_LOCK_WAITING_read << lock_type);
247 raw_spin_unlock(&lock->wait_lock);
250 lock_type = -ret - 1;
256 static void six_lock_wakeup(struct six_lock *lock, u32 state,
257 enum six_lock_type lock_type)
259 if (lock_type == SIX_LOCK_write && (state & SIX_LOCK_HELD_read))
262 if (!(state & (SIX_LOCK_WAITING_read << lock_type)))
265 __six_lock_wakeup(lock, lock_type);
269 static bool do_six_trylock(struct six_lock *lock, enum six_lock_type type, bool try)
273 ret = __do_six_trylock(lock, type, current, try);
275 __six_lock_wakeup(lock, -ret - 1);
281 * six_trylock_ip - attempt to take a six lock without blocking
282 * @lock: lock to take
283 * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
284 * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
286 * Return: true on success, false on failure.
288 bool six_trylock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip)
290 if (!do_six_trylock(lock, type, true))
293 if (type != SIX_LOCK_write)
294 six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read, ip);
297 EXPORT_SYMBOL_GPL(six_trylock_ip);
300 * six_relock_ip - attempt to re-take a lock that was held previously
301 * @lock: lock to take
302 * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
303 * @seq: lock sequence number obtained from six_lock_seq() while lock was
305 * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
307 * Return: true on success, false on failure.
309 bool six_relock_ip(struct six_lock *lock, enum six_lock_type type,
310 unsigned seq, unsigned long ip)
312 if (six_lock_seq(lock) != seq || !six_trylock_ip(lock, type, ip))
315 if (six_lock_seq(lock) != seq) {
316 six_unlock_ip(lock, type, ip);
322 EXPORT_SYMBOL_GPL(six_relock_ip);
324 #ifdef CONFIG_LOCK_SPIN_ON_OWNER
326 static inline bool six_owner_running(struct six_lock *lock)
329 * When there's no owner, we might have preempted between the owner
330 * acquiring the lock and setting the owner field. If we're an RT task
331 * that will live-lock because we won't let the owner complete.
334 struct task_struct *owner = READ_ONCE(lock->owner);
335 bool ret = owner ? owner_on_cpu(owner) : !rt_task(current);
341 static inline bool six_optimistic_spin(struct six_lock *lock,
342 struct six_lock_waiter *wait,
343 enum six_lock_type type)
348 if (type == SIX_LOCK_write)
351 if (lock->wait_list.next != &wait->list)
354 if (atomic_read(&lock->state) & SIX_LOCK_NOSPIN)
358 end_time = sched_clock() + 10 * NSEC_PER_USEC;
360 while (!need_resched() && six_owner_running(lock)) {
362 * Ensures that writes to the waitlist entry happen after we see
363 * wait->lock_acquired: pairs with the smp_store_release in
366 if (smp_load_acquire(&wait->lock_acquired)) {
371 if (!(++loop & 0xf) && (time_after64(sched_clock(), end_time))) {
372 six_set_bitmask(lock, SIX_LOCK_NOSPIN);
377 * The cpu_relax() call is a compiler barrier which forces
378 * everything in this loop to be re-loaded. We don't need
379 * memory barriers as we'll eventually observe the right
380 * values at the cost of a few extra spins.
389 #else /* CONFIG_LOCK_SPIN_ON_OWNER */
391 static inline bool six_optimistic_spin(struct six_lock *lock,
392 struct six_lock_waiter *wait,
393 enum six_lock_type type)
401 static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type,
402 struct six_lock_waiter *wait,
403 six_lock_should_sleep_fn should_sleep_fn, void *p,
408 if (type == SIX_LOCK_write) {
409 EBUG_ON(atomic_read(&lock->state) & SIX_LOCK_HELD_write);
410 atomic_add(SIX_LOCK_HELD_write, &lock->state);
411 smp_mb__after_atomic();
414 trace_contention_begin(lock, 0);
415 lock_contended(&lock->dep_map, ip);
417 wait->task = current;
418 wait->lock_want = type;
419 wait->lock_acquired = false;
421 raw_spin_lock(&lock->wait_lock);
422 six_set_bitmask(lock, SIX_LOCK_WAITING_read << type);
424 * Retry taking the lock after taking waitlist lock, in case we raced
427 ret = __do_six_trylock(lock, type, current, false);
429 wait->start_time = local_clock();
431 if (!list_empty(&lock->wait_list)) {
432 struct six_lock_waiter *last =
433 list_last_entry(&lock->wait_list,
434 struct six_lock_waiter, list);
436 if (time_before_eq64(wait->start_time, last->start_time))
437 wait->start_time = last->start_time + 1;
440 list_add_tail(&wait->list, &lock->wait_list);
442 raw_spin_unlock(&lock->wait_lock);
444 if (unlikely(ret > 0)) {
449 if (unlikely(ret < 0)) {
450 __six_lock_wakeup(lock, -ret - 1);
454 if (six_optimistic_spin(lock, wait, type))
458 set_current_state(TASK_UNINTERRUPTIBLE);
461 * Ensures that writes to the waitlist entry happen after we see
462 * wait->lock_acquired: pairs with the smp_store_release in
465 if (smp_load_acquire(&wait->lock_acquired))
468 ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
473 * If should_sleep_fn() returns an error, we are
474 * required to return that error even if we already
475 * acquired the lock - should_sleep_fn() might have
476 * modified external state (e.g. when the deadlock cycle
477 * detector in bcachefs issued a transaction restart)
479 raw_spin_lock(&lock->wait_lock);
480 acquired = wait->lock_acquired;
482 list_del(&wait->list);
483 raw_spin_unlock(&lock->wait_lock);
485 if (unlikely(acquired))
486 do_six_unlock_type(lock, type);
493 __set_current_state(TASK_RUNNING);
495 if (ret && type == SIX_LOCK_write) {
496 six_clear_bitmask(lock, SIX_LOCK_HELD_write);
497 six_lock_wakeup(lock, atomic_read(&lock->state), SIX_LOCK_read);
499 trace_contention_end(lock, 0);
505 * six_lock_ip_waiter - take a lock, with full waitlist interface
506 * @lock: lock to take
507 * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
508 * @wait: pointer to wait object, which will be added to lock's waitlist
509 * @should_sleep_fn: callback run after adding to waitlist, immediately prior
511 * @p: passed through to @should_sleep_fn
512 * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
514 * This is the most general six_lock() variant, with parameters to support full
515 * cycle detection for deadlock avoidance.
517 * The code calling this function must implement tracking of held locks, and the
518 * @wait object should be embedded into the struct that tracks held locks -
519 * which must also be accessible in a thread-safe way.
521 * @should_sleep_fn should invoke the cycle detector; it should walk each
522 * lock's waiters, and for each waiter recursively walk their held locks.
524 * When this function must block, @wait will be added to @lock's waitlist before
525 * calling trylock, and before calling @should_sleep_fn, and @wait will not be
526 * removed from the lock waitlist until the lock has been successfully acquired,
529 * @wait.start_time will be monotonically increasing for any given waitlist, and
530 * thus may be used as a loop cursor.
532 * Return: 0 on success, or the return code from @should_sleep_fn on failure.
534 int six_lock_ip_waiter(struct six_lock *lock, enum six_lock_type type,
535 struct six_lock_waiter *wait,
536 six_lock_should_sleep_fn should_sleep_fn, void *p,
541 wait->start_time = 0;
543 if (type != SIX_LOCK_write)
544 six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, ip);
546 ret = do_six_trylock(lock, type, true) ? 0
547 : six_lock_slowpath(lock, type, wait, should_sleep_fn, p, ip);
549 if (ret && type != SIX_LOCK_write)
550 six_release(&lock->dep_map, ip);
552 lock_acquired(&lock->dep_map, ip);
556 EXPORT_SYMBOL_GPL(six_lock_ip_waiter);
559 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type)
563 if (type == SIX_LOCK_intent)
566 if (type == SIX_LOCK_read &&
568 smp_mb(); /* unlock barrier */
569 this_cpu_dec(*lock->readers);
570 smp_mb(); /* between unlocking and checking for waiters */
571 state = atomic_read(&lock->state);
573 u32 v = l[type].lock_val;
575 if (type != SIX_LOCK_read)
576 v += atomic_read(&lock->state) & SIX_LOCK_NOSPIN;
578 EBUG_ON(!(atomic_read(&lock->state) & l[type].held_mask));
579 state = atomic_sub_return_release(v, &lock->state);
582 six_lock_wakeup(lock, state, l[type].unlock_wakeup);
586 * six_unlock_ip - drop a six lock
587 * @lock: lock to unlock
588 * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
589 * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
591 * When a lock is held multiple times (because six_lock_incement()) was used),
592 * this decrements the 'lock held' counter by one.
595 * six_lock_read(&foo->lock); read count 1
596 * six_lock_increment(&foo->lock, SIX_LOCK_read); read count 2
597 * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 1
598 * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 0
600 void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip)
602 EBUG_ON(type == SIX_LOCK_write &&
603 !(atomic_read(&lock->state) & SIX_LOCK_HELD_intent));
604 EBUG_ON((type == SIX_LOCK_write ||
605 type == SIX_LOCK_intent) &&
606 lock->owner != current);
608 if (type != SIX_LOCK_write)
609 six_release(&lock->dep_map, ip);
613 if (type == SIX_LOCK_intent &&
614 lock->intent_lock_recurse) {
615 --lock->intent_lock_recurse;
619 do_six_unlock_type(lock, type);
621 EXPORT_SYMBOL_GPL(six_unlock_ip);
624 * six_lock_downgrade - convert an intent lock to a read lock
625 * @lock: lock to dowgrade
627 * @lock will have read count incremented and intent count decremented
629 void six_lock_downgrade(struct six_lock *lock)
631 six_lock_increment(lock, SIX_LOCK_read);
632 six_unlock_intent(lock);
634 EXPORT_SYMBOL_GPL(six_lock_downgrade);
637 * six_lock_tryupgrade - attempt to convert read lock to an intent lock
638 * @lock: lock to upgrade
640 * On success, @lock will have intent count incremented and read count
643 * Return: true on success, false on failure
645 bool six_lock_tryupgrade(struct six_lock *lock)
647 u32 old = atomic_read(&lock->state), new;
652 if (new & SIX_LOCK_HELD_intent)
655 if (!lock->readers) {
656 EBUG_ON(!(new & SIX_LOCK_HELD_read));
657 new -= l[SIX_LOCK_read].lock_val;
660 new |= SIX_LOCK_HELD_intent;
661 } while (!atomic_try_cmpxchg_acquire(&lock->state, &old, new));
664 this_cpu_dec(*lock->readers);
666 six_set_owner(lock, SIX_LOCK_intent, old, current);
670 EXPORT_SYMBOL_GPL(six_lock_tryupgrade);
673 * six_trylock_convert - attempt to convert a held lock from one type to another
674 * @lock: lock to upgrade
675 * @from: SIX_LOCK_read or SIX_LOCK_intent
676 * @to: SIX_LOCK_read or SIX_LOCK_intent
678 * On success, @lock will have intent count incremented and read count
681 * Return: true on success, false on failure
683 bool six_trylock_convert(struct six_lock *lock,
684 enum six_lock_type from,
685 enum six_lock_type to)
687 EBUG_ON(to == SIX_LOCK_write || from == SIX_LOCK_write);
692 if (to == SIX_LOCK_read) {
693 six_lock_downgrade(lock);
696 return six_lock_tryupgrade(lock);
699 EXPORT_SYMBOL_GPL(six_trylock_convert);
702 * six_lock_increment - increase held lock count on a lock that is already held
703 * @lock: lock to increment
704 * @type: SIX_LOCK_read or SIX_LOCK_intent
706 * @lock must already be held, with a lock type that is greater than or equal to
709 * A corresponding six_unlock_type() call will be required for @lock to be fully
712 void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
714 six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, _RET_IP_);
716 /* XXX: assert already locked, and that we don't overflow: */
721 this_cpu_inc(*lock->readers);
723 EBUG_ON(!(atomic_read(&lock->state) &
725 SIX_LOCK_HELD_intent)));
726 atomic_add(l[type].lock_val, &lock->state);
729 case SIX_LOCK_intent:
730 EBUG_ON(!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent));
731 lock->intent_lock_recurse++;
738 EXPORT_SYMBOL_GPL(six_lock_increment);
741 * six_lock_wakeup_all - wake up all waiters on @lock
742 * @lock: lock to wake up waiters for
744 * Wakeing up waiters will cause them to re-run should_sleep_fn, which may then
745 * abort the lock operation.
747 * This function is never needed in a bug-free program; it's only useful in
748 * debug code, e.g. to determine if a cycle detector is at fault.
750 void six_lock_wakeup_all(struct six_lock *lock)
752 u32 state = atomic_read(&lock->state);
753 struct six_lock_waiter *w;
755 six_lock_wakeup(lock, state, SIX_LOCK_read);
756 six_lock_wakeup(lock, state, SIX_LOCK_intent);
757 six_lock_wakeup(lock, state, SIX_LOCK_write);
759 raw_spin_lock(&lock->wait_lock);
760 list_for_each_entry(w, &lock->wait_list, list)
761 wake_up_process(w->task);
762 raw_spin_unlock(&lock->wait_lock);
764 EXPORT_SYMBOL_GPL(six_lock_wakeup_all);
767 * six_lock_counts - return held lock counts, for each lock type
768 * @lock: lock to return counters for
770 * Return: the number of times a lock is held for read, intent and write.
772 struct six_lock_count six_lock_counts(struct six_lock *lock)
774 struct six_lock_count ret;
776 ret.n[SIX_LOCK_read] = !lock->readers
777 ? atomic_read(&lock->state) & SIX_LOCK_HELD_read
778 : pcpu_read_count(lock);
779 ret.n[SIX_LOCK_intent] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent) +
780 lock->intent_lock_recurse;
781 ret.n[SIX_LOCK_write] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_write);
785 EXPORT_SYMBOL_GPL(six_lock_counts);
788 * six_lock_readers_add - directly manipulate reader count of a lock
789 * @lock: lock to add/subtract readers for
790 * @nr: reader count to add/subtract
792 * When an upper layer is implementing lock reentrency, we may have both read
793 * and intent locks on the same lock.
795 * When we need to take a write lock, the read locks will cause self-deadlock,
796 * because six locks themselves do not track which read locks are held by the
797 * current thread and which are held by a different thread - it does no
798 * per-thread tracking of held locks.
800 * The upper layer that is tracking held locks may however, if trylock() has
801 * failed, count up its own read locks, subtract them, take the write lock, and
804 * As in any other situation when taking a write lock, @lock must be held for
805 * intent one (or more) times, so @lock will never be left unlocked.
807 void six_lock_readers_add(struct six_lock *lock, int nr)
810 this_cpu_add(*lock->readers, nr);
812 EBUG_ON((int) (atomic_read(&lock->state) & SIX_LOCK_HELD_read) + nr < 0);
813 /* reader count starts at bit 0 */
814 atomic_add(nr, &lock->state);
817 EXPORT_SYMBOL_GPL(six_lock_readers_add);
820 * six_lock_exit - release resources held by a lock prior to freeing
821 * @lock: lock to exit
823 * When a lock was initialized in percpu mode (SIX_OLCK_INIT_PCPU), this is
824 * required to free the percpu read counts.
826 void six_lock_exit(struct six_lock *lock)
828 WARN_ON(lock->readers && pcpu_read_count(lock));
829 WARN_ON(atomic_read(&lock->state) & SIX_LOCK_HELD_read);
831 free_percpu(lock->readers);
832 lock->readers = NULL;
834 EXPORT_SYMBOL_GPL(six_lock_exit);
836 void __six_lock_init(struct six_lock *lock, const char *name,
837 struct lock_class_key *key, enum six_lock_init_flags flags)
839 atomic_set(&lock->state, 0);
840 raw_spin_lock_init(&lock->wait_lock);
841 INIT_LIST_HEAD(&lock->wait_list);
842 #ifdef CONFIG_DEBUG_LOCK_ALLOC
843 debug_check_no_locks_freed((void *) lock, sizeof(*lock));
844 lockdep_init_map(&lock->dep_map, name, key, 0);
848 * Don't assume that we have real percpu variables available in
852 if (flags & SIX_LOCK_INIT_PCPU) {
854 * We don't return an error here on memory allocation failure
855 * since percpu is an optimization, and locks will work with the
856 * same semantics in non-percpu mode: callers can check for
857 * failure if they wish by checking lock->readers, but generally
858 * will not want to treat it as an error.
860 lock->readers = alloc_percpu(unsigned);
864 EXPORT_SYMBOL_GPL(__six_lock_init);