1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/export.h>
4 #include <linux/log2.h>
5 #include <linux/percpu.h>
6 #include <linux/preempt.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/sched/rt.h>
11 #include <linux/six.h>
12 #include <linux/slab.h>
14 #include <trace/events/lock.h>
17 #define EBUG_ON(cond) BUG_ON(cond)
19 #define EBUG_ON(cond) do {} while (0)
22 #define six_acquire(l, t, r, ip) lock_acquire(l, 0, t, r, 1, NULL, ip)
23 #define six_release(l, ip) lock_release(l, ip)
25 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type);
27 #define SIX_LOCK_HELD_read_OFFSET 0
28 #define SIX_LOCK_HELD_read ~(~0U << 26)
29 #define SIX_LOCK_HELD_intent (1U << 26)
30 #define SIX_LOCK_HELD_write (1U << 27)
31 #define SIX_LOCK_WAITING_read (1U << (28 + SIX_LOCK_read))
32 #define SIX_LOCK_WAITING_intent (1U << (28 + SIX_LOCK_intent))
33 #define SIX_LOCK_WAITING_write (1U << (28 + SIX_LOCK_write))
34 #define SIX_LOCK_NOSPIN (1U << 31)
36 struct six_lock_vals {
37 /* Value we add to the lock in order to take the lock: */
40 /* If the lock has this value (used as a mask), taking the lock fails: */
43 /* Mask that indicates lock is held for this type: */
46 /* Waitlist we wakeup when releasing the lock: */
47 enum six_lock_type unlock_wakeup;
50 static const struct six_lock_vals l[] = {
52 .lock_val = 1U << SIX_LOCK_HELD_read_OFFSET,
53 .lock_fail = SIX_LOCK_HELD_write,
54 .held_mask = SIX_LOCK_HELD_read,
55 .unlock_wakeup = SIX_LOCK_write,
58 .lock_val = SIX_LOCK_HELD_intent,
59 .lock_fail = SIX_LOCK_HELD_intent,
60 .held_mask = SIX_LOCK_HELD_intent,
61 .unlock_wakeup = SIX_LOCK_intent,
64 .lock_val = SIX_LOCK_HELD_write,
65 .lock_fail = SIX_LOCK_HELD_read,
66 .held_mask = SIX_LOCK_HELD_write,
67 .unlock_wakeup = SIX_LOCK_read,
71 static inline void six_set_bitmask(struct six_lock *lock, u32 mask)
73 if ((atomic_read(&lock->state) & mask) != mask)
74 atomic_or(mask, &lock->state);
77 static inline void six_clear_bitmask(struct six_lock *lock, u32 mask)
79 if (atomic_read(&lock->state) & mask)
80 atomic_and(~mask, &lock->state);
83 static inline void six_set_owner(struct six_lock *lock, enum six_lock_type type,
84 u32 old, struct task_struct *owner)
86 if (type != SIX_LOCK_intent)
89 if (!(old & SIX_LOCK_HELD_intent)) {
93 EBUG_ON(lock->owner != current);
97 static inline unsigned pcpu_read_count(struct six_lock *lock)
99 unsigned read_count = 0;
102 for_each_possible_cpu(cpu)
103 read_count += *per_cpu_ptr(lock->readers, cpu);
108 * __do_six_trylock() - main trylock routine
110 * Returns 1 on success, 0 on failure
112 * In percpu reader mode, a failed trylock may cause a spurious trylock failure
113 * for anoter thread taking the competing lock type, and we may havve to do a
114 * wakeup: when a wakeup is required, we return -1 - wakeup_type.
116 static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type,
117 struct task_struct *task, bool try)
122 EBUG_ON(type == SIX_LOCK_write && lock->owner != task);
123 EBUG_ON(type == SIX_LOCK_write &&
124 (try != !(atomic_read(&lock->state) & SIX_LOCK_HELD_write)));
127 * Percpu reader mode:
129 * The basic idea behind this algorithm is that you can implement a lock
130 * between two threads without any atomics, just memory barriers:
132 * For two threads you'll need two variables, one variable for "thread a
133 * has the lock" and another for "thread b has the lock".
135 * To take the lock, a thread sets its variable indicating that it holds
136 * the lock, then issues a full memory barrier, then reads from the
137 * other thread's variable to check if the other thread thinks it has
138 * the lock. If we raced, we backoff and retry/sleep.
140 * Failure to take the lock may cause a spurious trylock failure in
141 * another thread, because we temporarily set the lock to indicate that
142 * we held it. This would be a problem for a thread in six_lock(), when
143 * they are calling trylock after adding themself to the waitlist and
146 * Therefore, if we fail to get the lock, and there were waiters of the
147 * type we conflict with, we will have to issue a wakeup.
149 * Since we may be called under wait_lock (and by the wakeup code
150 * itself), we return that the wakeup has to be done instead of doing it
153 if (type == SIX_LOCK_read && lock->readers) {
155 this_cpu_inc(*lock->readers); /* signal that we own lock */
159 old = atomic_read(&lock->state);
160 ret = !(old & l[type].lock_fail);
162 this_cpu_sub(*lock->readers, !ret);
165 if (!ret && (old & SIX_LOCK_WAITING_write))
166 ret = -1 - SIX_LOCK_write;
167 } else if (type == SIX_LOCK_write && lock->readers) {
169 atomic_add(SIX_LOCK_HELD_write, &lock->state);
170 smp_mb__after_atomic();
173 ret = !pcpu_read_count(lock);
176 old = atomic_sub_return(SIX_LOCK_HELD_write, &lock->state);
177 if (old & SIX_LOCK_WAITING_read)
178 ret = -1 - SIX_LOCK_read;
181 old = atomic_read(&lock->state);
183 ret = !(old & l[type].lock_fail);
184 if (!ret || (type == SIX_LOCK_write && !try)) {
188 } while (!atomic_try_cmpxchg_acquire(&lock->state, &old, old + l[type].lock_val));
190 EBUG_ON(ret && !(atomic_read(&lock->state) & l[type].held_mask));
194 six_set_owner(lock, type, old, task);
196 EBUG_ON(type == SIX_LOCK_write && try && ret <= 0 &&
197 (atomic_read(&lock->state) & SIX_LOCK_HELD_write));
202 static void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_type)
204 struct six_lock_waiter *w, *next;
205 struct task_struct *task;
211 raw_spin_lock(&lock->wait_lock);
213 list_for_each_entry_safe(w, next, &lock->wait_list, list) {
214 if (w->lock_want != lock_type)
217 if (saw_one && lock_type != SIX_LOCK_read)
221 ret = __do_six_trylock(lock, lock_type, w->task, false);
225 __list_del(w->list.prev, w->list.next);
228 * Do no writes to @w besides setting lock_acquired - otherwise
229 * we would need a memory barrier:
232 w->lock_acquired = true;
233 wake_up_process(task);
236 six_clear_bitmask(lock, SIX_LOCK_WAITING_read << lock_type);
238 raw_spin_unlock(&lock->wait_lock);
241 lock_type = -ret - 1;
247 static void six_lock_wakeup(struct six_lock *lock, u32 state,
248 enum six_lock_type lock_type)
250 if (lock_type == SIX_LOCK_write && (state & SIX_LOCK_HELD_read))
253 if (!(state & (SIX_LOCK_WAITING_read << lock_type)))
256 __six_lock_wakeup(lock, lock_type);
260 static bool do_six_trylock(struct six_lock *lock, enum six_lock_type type, bool try)
264 ret = __do_six_trylock(lock, type, current, try);
266 __six_lock_wakeup(lock, -ret - 1);
272 * six_trylock_ip - attempt to take a six lock without blocking
273 * @lock: lock to take
274 * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
275 * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
277 * Return: true on success, false on failure.
279 bool six_trylock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip)
281 if (!do_six_trylock(lock, type, true))
284 if (type != SIX_LOCK_write)
285 six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read, ip);
288 EXPORT_SYMBOL_GPL(six_trylock_ip);
291 * six_relock_ip - attempt to re-take a lock that was held previously
292 * @lock: lock to take
293 * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
294 * @seq: lock sequence number obtained from six_lock_seq() while lock was
296 * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
298 * Return: true on success, false on failure.
300 bool six_relock_ip(struct six_lock *lock, enum six_lock_type type,
301 unsigned seq, unsigned long ip)
303 if (six_lock_seq(lock) != seq || !six_trylock_ip(lock, type, ip))
306 if (six_lock_seq(lock) != seq) {
307 six_unlock_ip(lock, type, ip);
313 EXPORT_SYMBOL_GPL(six_relock_ip);
315 #ifdef CONFIG_LOCK_SPIN_ON_OWNER
317 static inline bool six_can_spin_on_owner(struct six_lock *lock)
319 struct task_struct *owner;
326 owner = READ_ONCE(lock->owner);
327 ret = !owner || owner_on_cpu(owner);
333 static inline bool six_spin_on_owner(struct six_lock *lock,
334 struct task_struct *owner,
341 while (lock->owner == owner) {
343 * Ensure we emit the owner->on_cpu, dereference _after_
344 * checking lock->owner still matches owner. If that fails,
345 * owner might point to freed memory. If it still matches,
346 * the rcu_read_lock() ensures the memory stays valid.
350 if (!owner_on_cpu(owner) || need_resched()) {
355 if (!(++loop & 0xf) && (time_after64(sched_clock(), end_time))) {
356 six_set_bitmask(lock, SIX_LOCK_NOSPIN);
368 static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type type)
370 struct task_struct *task = current;
373 if (type == SIX_LOCK_write)
377 if (!six_can_spin_on_owner(lock))
380 if (!osq_lock(&lock->osq))
383 end_time = sched_clock() + 10 * NSEC_PER_USEC;
386 struct task_struct *owner;
389 * If there's an owner, wait for it to either
390 * release the lock or go to sleep.
392 owner = READ_ONCE(lock->owner);
393 if (owner && !six_spin_on_owner(lock, owner, end_time))
396 if (do_six_trylock(lock, type, false)) {
397 osq_unlock(&lock->osq);
403 * When there's no owner, we might have preempted between the
404 * owner acquiring the lock and setting the owner field. If
405 * we're an RT task that will live-lock because we won't let
406 * the owner complete.
408 if (!owner && (need_resched() || rt_task(task)))
412 * The cpu_relax() call is a compiler barrier which forces
413 * everything in this loop to be re-loaded. We don't need
414 * memory barriers as we'll eventually observe the right
415 * values at the cost of a few extra spins.
420 osq_unlock(&lock->osq);
425 * If we fell out of the spin path because of need_resched(),
426 * reschedule now, before we try-lock again. This avoids getting
427 * scheduled out right after we obtained the lock.
435 #else /* CONFIG_LOCK_SPIN_ON_OWNER */
437 static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type type)
445 static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type,
446 struct six_lock_waiter *wait,
447 six_lock_should_sleep_fn should_sleep_fn, void *p,
452 if (type == SIX_LOCK_write) {
453 EBUG_ON(atomic_read(&lock->state) & SIX_LOCK_HELD_write);
454 atomic_add(SIX_LOCK_HELD_write, &lock->state);
455 smp_mb__after_atomic();
458 trace_contention_begin(lock, 0);
459 lock_contended(&lock->dep_map, ip);
461 if (six_optimistic_spin(lock, type))
464 wait->task = current;
465 wait->lock_want = type;
466 wait->lock_acquired = false;
468 raw_spin_lock(&lock->wait_lock);
469 six_set_bitmask(lock, SIX_LOCK_WAITING_read << type);
471 * Retry taking the lock after taking waitlist lock, in case we raced
474 ret = __do_six_trylock(lock, type, current, false);
476 wait->start_time = local_clock();
478 if (!list_empty(&lock->wait_list)) {
479 struct six_lock_waiter *last =
480 list_last_entry(&lock->wait_list,
481 struct six_lock_waiter, list);
483 if (time_before_eq64(wait->start_time, last->start_time))
484 wait->start_time = last->start_time + 1;
487 list_add_tail(&wait->list, &lock->wait_list);
489 raw_spin_unlock(&lock->wait_lock);
491 if (unlikely(ret > 0)) {
496 if (unlikely(ret < 0)) {
497 __six_lock_wakeup(lock, -ret - 1);
502 set_current_state(TASK_UNINTERRUPTIBLE);
504 if (wait->lock_acquired)
507 ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
509 raw_spin_lock(&lock->wait_lock);
510 if (!wait->lock_acquired)
511 list_del(&wait->list);
512 raw_spin_unlock(&lock->wait_lock);
514 if (unlikely(wait->lock_acquired))
515 do_six_unlock_type(lock, type);
522 __set_current_state(TASK_RUNNING);
524 if (ret && type == SIX_LOCK_write) {
525 six_clear_bitmask(lock, SIX_LOCK_HELD_write);
526 six_lock_wakeup(lock, atomic_read(&lock->state), SIX_LOCK_read);
528 trace_contention_end(lock, 0);
534 * six_lock_ip_waiter - take a lock, with full waitlist interface
535 * @lock: lock to take
536 * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
537 * @wait: pointer to wait object, which will be added to lock's waitlist
538 * @should_sleep_fn: callback run after adding to waitlist, immediately prior
540 * @p: passed through to @should_sleep_fn
541 * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
543 * This is the most general six_lock() variant, with parameters to support full
544 * cycle detection for deadlock avoidance.
546 * The code calling this function must implement tracking of held locks, and the
547 * @wait object should be embedded into the struct that tracks held locks -
548 * which must also be accessible in a thread-safe way.
550 * @should_sleep_fn should invoke the cycle detector; it should walk each
551 * lock's waiters, and for each waiter recursively walk their held locks.
553 * When this function must block, @wait will be added to @lock's waitlist before
554 * calling trylock, and before calling @should_sleep_fn, and @wait will not be
555 * removed from the lock waitlist until the lock has been successfully acquired,
558 * @wait.start_time will be monotonically increasing for any given waitlist, and
559 * thus may be used as a loop cursor.
561 * Return: 0 on success, or the return code from @should_sleep_fn on failure.
563 int six_lock_ip_waiter(struct six_lock *lock, enum six_lock_type type,
564 struct six_lock_waiter *wait,
565 six_lock_should_sleep_fn should_sleep_fn, void *p,
570 wait->start_time = 0;
572 if (type != SIX_LOCK_write)
573 six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, ip);
575 ret = do_six_trylock(lock, type, true) ? 0
576 : six_lock_slowpath(lock, type, wait, should_sleep_fn, p, ip);
578 if (ret && type != SIX_LOCK_write)
579 six_release(&lock->dep_map, ip);
581 lock_acquired(&lock->dep_map, ip);
585 EXPORT_SYMBOL_GPL(six_lock_ip_waiter);
588 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type)
592 if (type == SIX_LOCK_intent)
595 if (type == SIX_LOCK_read &&
597 smp_mb(); /* unlock barrier */
598 this_cpu_dec(*lock->readers);
599 smp_mb(); /* between unlocking and checking for waiters */
600 state = atomic_read(&lock->state);
602 u32 v = l[type].lock_val;
604 if (type != SIX_LOCK_read)
605 v += atomic_read(&lock->state) & SIX_LOCK_NOSPIN;
607 EBUG_ON(!(atomic_read(&lock->state) & l[type].held_mask));
608 state = atomic_sub_return_release(v, &lock->state);
611 six_lock_wakeup(lock, state, l[type].unlock_wakeup);
615 * six_unlock_ip - drop a six lock
616 * @lock: lock to unlock
617 * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
618 * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
620 * When a lock is held multiple times (because six_lock_incement()) was used),
621 * this decrements the 'lock held' counter by one.
624 * six_lock_read(&foo->lock); read count 1
625 * six_lock_increment(&foo->lock, SIX_LOCK_read); read count 2
626 * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 1
627 * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 0
629 void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip)
631 EBUG_ON(type == SIX_LOCK_write &&
632 !(atomic_read(&lock->state) & SIX_LOCK_HELD_intent));
633 EBUG_ON((type == SIX_LOCK_write ||
634 type == SIX_LOCK_intent) &&
635 lock->owner != current);
637 if (type != SIX_LOCK_write)
638 six_release(&lock->dep_map, ip);
642 if (type == SIX_LOCK_intent &&
643 lock->intent_lock_recurse) {
644 --lock->intent_lock_recurse;
648 do_six_unlock_type(lock, type);
650 EXPORT_SYMBOL_GPL(six_unlock_ip);
653 * six_lock_downgrade - convert an intent lock to a read lock
654 * @lock: lock to dowgrade
656 * @lock will have read count incremented and intent count decremented
658 void six_lock_downgrade(struct six_lock *lock)
660 six_lock_increment(lock, SIX_LOCK_read);
661 six_unlock_intent(lock);
663 EXPORT_SYMBOL_GPL(six_lock_downgrade);
666 * six_lock_tryupgrade - attempt to convert read lock to an intent lock
667 * @lock: lock to upgrade
669 * On success, @lock will have intent count incremented and read count
672 * Return: true on success, false on failure
674 bool six_lock_tryupgrade(struct six_lock *lock)
676 u32 old = atomic_read(&lock->state), new;
681 if (new & SIX_LOCK_HELD_intent)
684 if (!lock->readers) {
685 EBUG_ON(!(new & SIX_LOCK_HELD_read));
686 new -= l[SIX_LOCK_read].lock_val;
689 new |= SIX_LOCK_HELD_intent;
690 } while (!atomic_try_cmpxchg_acquire(&lock->state, &old, new));
693 this_cpu_dec(*lock->readers);
695 six_set_owner(lock, SIX_LOCK_intent, old, current);
699 EXPORT_SYMBOL_GPL(six_lock_tryupgrade);
702 * six_trylock_convert - attempt to convert a held lock from one type to another
703 * @lock: lock to upgrade
704 * @from: SIX_LOCK_read or SIX_LOCK_intent
705 * @to: SIX_LOCK_read or SIX_LOCK_intent
707 * On success, @lock will have intent count incremented and read count
710 * Return: true on success, false on failure
712 bool six_trylock_convert(struct six_lock *lock,
713 enum six_lock_type from,
714 enum six_lock_type to)
716 EBUG_ON(to == SIX_LOCK_write || from == SIX_LOCK_write);
721 if (to == SIX_LOCK_read) {
722 six_lock_downgrade(lock);
725 return six_lock_tryupgrade(lock);
728 EXPORT_SYMBOL_GPL(six_trylock_convert);
731 * six_lock_increment - increase held lock count on a lock that is already held
732 * @lock: lock to increment
733 * @type: SIX_LOCK_read or SIX_LOCK_intent
735 * @lock must already be held, with a lock type that is greater than or equal to
738 * A corresponding six_unlock_type() call will be required for @lock to be fully
741 void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
743 six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, _RET_IP_);
745 /* XXX: assert already locked, and that we don't overflow: */
750 this_cpu_inc(*lock->readers);
752 EBUG_ON(!(atomic_read(&lock->state) &
754 SIX_LOCK_HELD_intent)));
755 atomic_add(l[type].lock_val, &lock->state);
758 case SIX_LOCK_intent:
759 EBUG_ON(!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent));
760 lock->intent_lock_recurse++;
767 EXPORT_SYMBOL_GPL(six_lock_increment);
770 * six_lock_wakeup_all - wake up all waiters on @lock
771 * @lock: lock to wake up waiters for
773 * Wakeing up waiters will cause them to re-run should_sleep_fn, which may then
774 * abort the lock operation.
776 * This function is never needed in a bug-free program; it's only useful in
777 * debug code, e.g. to determine if a cycle detector is at fault.
779 void six_lock_wakeup_all(struct six_lock *lock)
781 u32 state = atomic_read(&lock->state);
782 struct six_lock_waiter *w;
784 six_lock_wakeup(lock, state, SIX_LOCK_read);
785 six_lock_wakeup(lock, state, SIX_LOCK_intent);
786 six_lock_wakeup(lock, state, SIX_LOCK_write);
788 raw_spin_lock(&lock->wait_lock);
789 list_for_each_entry(w, &lock->wait_list, list)
790 wake_up_process(w->task);
791 raw_spin_unlock(&lock->wait_lock);
793 EXPORT_SYMBOL_GPL(six_lock_wakeup_all);
796 * six_lock_counts - return held lock counts, for each lock type
797 * @lock: lock to return counters for
799 * Return: the number of times a lock is held for read, intent and write.
801 struct six_lock_count six_lock_counts(struct six_lock *lock)
803 struct six_lock_count ret;
805 ret.n[SIX_LOCK_read] = !lock->readers
806 ? atomic_read(&lock->state) & SIX_LOCK_HELD_read
807 : pcpu_read_count(lock);
808 ret.n[SIX_LOCK_intent] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent) +
809 lock->intent_lock_recurse;
810 ret.n[SIX_LOCK_write] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_write);
814 EXPORT_SYMBOL_GPL(six_lock_counts);
817 * six_lock_readers_add - directly manipulate reader count of a lock
818 * @lock: lock to add/subtract readers for
819 * @nr: reader count to add/subtract
821 * When an upper layer is implementing lock reentrency, we may have both read
822 * and intent locks on the same lock.
824 * When we need to take a write lock, the read locks will cause self-deadlock,
825 * because six locks themselves do not track which read locks are held by the
826 * current thread and which are held by a different thread - it does no
827 * per-thread tracking of held locks.
829 * The upper layer that is tracking held locks may however, if trylock() has
830 * failed, count up its own read locks, subtract them, take the write lock, and
833 * As in any other situation when taking a write lock, @lock must be held for
834 * intent one (or more) times, so @lock will never be left unlocked.
836 void six_lock_readers_add(struct six_lock *lock, int nr)
839 this_cpu_add(*lock->readers, nr);
841 EBUG_ON((int) (atomic_read(&lock->state) & SIX_LOCK_HELD_read) + nr < 0);
842 /* reader count starts at bit 0 */
843 atomic_add(nr, &lock->state);
846 EXPORT_SYMBOL_GPL(six_lock_readers_add);
849 * six_lock_exit - release resources held by a lock prior to freeing
850 * @lock: lock to exit
852 * When a lock was initialized in percpu mode (SIX_OLCK_INIT_PCPU), this is
853 * required to free the percpu read counts.
855 void six_lock_exit(struct six_lock *lock)
857 WARN_ON(lock->readers && pcpu_read_count(lock));
858 WARN_ON(atomic_read(&lock->state) & SIX_LOCK_HELD_read);
860 free_percpu(lock->readers);
861 lock->readers = NULL;
863 EXPORT_SYMBOL_GPL(six_lock_exit);
865 void __six_lock_init(struct six_lock *lock, const char *name,
866 struct lock_class_key *key, enum six_lock_init_flags flags)
868 atomic_set(&lock->state, 0);
869 raw_spin_lock_init(&lock->wait_lock);
870 INIT_LIST_HEAD(&lock->wait_list);
871 #ifdef CONFIG_DEBUG_LOCK_ALLOC
872 debug_check_no_locks_freed((void *) lock, sizeof(*lock));
873 lockdep_init_map(&lock->dep_map, name, key, 0);
877 * Don't assume that we have real percpu variables available in
881 if (flags & SIX_LOCK_INIT_PCPU) {
883 * We don't return an error here on memory allocation failure
884 * since percpu is an optimization, and locks will work with the
885 * same semantics in non-percpu mode: callers can check for
886 * failure if they wish by checking lock->readers, but generally
887 * will not want to treat it as an error.
889 lock->readers = alloc_percpu(unsigned);
893 EXPORT_SYMBOL_GPL(__six_lock_init);