]> git.sesse.net Git - bcachefs-tools-debian/blob - linux/six.c
New upstream release
[bcachefs-tools-debian] / linux / six.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/export.h>
4 #include <linux/log2.h>
5 #include <linux/percpu.h>
6 #include <linux/preempt.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/sched/rt.h>
11 #include <linux/six.h>
12 #include <linux/slab.h>
13
14 #ifdef DEBUG
15 #define EBUG_ON(cond)           BUG_ON(cond)
16 #else
17 #define EBUG_ON(cond)           do {} while (0)
18 #endif
19
20 #define six_acquire(l, t, r)    lock_acquire(l, 0, t, r, 1, NULL, _RET_IP_)
21 #define six_release(l)          lock_release(l, _RET_IP_)
22
23 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type);
24
25 struct six_lock_vals {
26         /* Value we add to the lock in order to take the lock: */
27         u64                     lock_val;
28
29         /* If the lock has this value (used as a mask), taking the lock fails: */
30         u64                     lock_fail;
31
32         /* Value we add to the lock in order to release the lock: */
33         u64                     unlock_val;
34
35         /* Mask that indicates lock is held for this type: */
36         u64                     held_mask;
37
38         /* Waitlist we wakeup when releasing the lock: */
39         enum six_lock_type      unlock_wakeup;
40 };
41
42 #define __SIX_LOCK_HELD_read    __SIX_VAL(read_lock, ~0)
43 #define __SIX_LOCK_HELD_intent  __SIX_VAL(intent_lock, ~0)
44 #define __SIX_LOCK_HELD_write   __SIX_VAL(seq, 1)
45
46 #define LOCK_VALS {                                                     \
47         [SIX_LOCK_read] = {                                             \
48                 .lock_val       = __SIX_VAL(read_lock, 1),              \
49                 .lock_fail      = __SIX_LOCK_HELD_write + __SIX_VAL(write_locking, 1),\
50                 .unlock_val     = -__SIX_VAL(read_lock, 1),             \
51                 .held_mask      = __SIX_LOCK_HELD_read,                 \
52                 .unlock_wakeup  = SIX_LOCK_write,                       \
53         },                                                              \
54         [SIX_LOCK_intent] = {                                           \
55                 .lock_val       = __SIX_VAL(intent_lock, 1),            \
56                 .lock_fail      = __SIX_LOCK_HELD_intent,               \
57                 .unlock_val     = -__SIX_VAL(intent_lock, 1),           \
58                 .held_mask      = __SIX_LOCK_HELD_intent,               \
59                 .unlock_wakeup  = SIX_LOCK_intent,                      \
60         },                                                              \
61         [SIX_LOCK_write] = {                                            \
62                 .lock_val       = __SIX_VAL(seq, 1),                    \
63                 .lock_fail      = __SIX_LOCK_HELD_read,                 \
64                 .unlock_val     = __SIX_VAL(seq, 1),                    \
65                 .held_mask      = __SIX_LOCK_HELD_write,                \
66                 .unlock_wakeup  = SIX_LOCK_read,                        \
67         },                                                              \
68 }
69
70 static inline void six_set_owner(struct six_lock *lock, enum six_lock_type type,
71                                  union six_lock_state old,
72                                  struct task_struct *owner)
73 {
74         if (type != SIX_LOCK_intent)
75                 return;
76
77         if (!old.intent_lock) {
78                 EBUG_ON(lock->owner);
79                 lock->owner = owner;
80         } else {
81                 EBUG_ON(lock->owner != current);
82         }
83 }
84
85 static inline unsigned pcpu_read_count(struct six_lock *lock)
86 {
87         unsigned read_count = 0;
88         int cpu;
89
90         for_each_possible_cpu(cpu)
91                 read_count += *per_cpu_ptr(lock->readers, cpu);
92         return read_count;
93 }
94
95 /* This is probably up there with the more evil things I've done */
96 #define waitlist_bitnr(id) ilog2((((union six_lock_state) { .waiters = 1 << (id) }).l))
97
98 static int __do_six_trylock_type(struct six_lock *lock,
99                                  enum six_lock_type type,
100                                  struct task_struct *task,
101                                  bool try)
102 {
103         const struct six_lock_vals l[] = LOCK_VALS;
104         union six_lock_state old, new;
105         int ret;
106         u64 v;
107
108         EBUG_ON(type == SIX_LOCK_write && lock->owner != task);
109         EBUG_ON(type == SIX_LOCK_write && (lock->state.seq & 1));
110         EBUG_ON(type == SIX_LOCK_write && (try != !(lock->state.write_locking)));
111
112         /*
113          * Percpu reader mode:
114          *
115          * The basic idea behind this algorithm is that you can implement a lock
116          * between two threads without any atomics, just memory barriers:
117          *
118          * For two threads you'll need two variables, one variable for "thread a
119          * has the lock" and another for "thread b has the lock".
120          *
121          * To take the lock, a thread sets its variable indicating that it holds
122          * the lock, then issues a full memory barrier, then reads from the
123          * other thread's variable to check if the other thread thinks it has
124          * the lock. If we raced, we backoff and retry/sleep.
125          */
126
127         if (type == SIX_LOCK_read && lock->readers) {
128                 preempt_disable();
129                 this_cpu_inc(*lock->readers); /* signal that we own lock */
130
131                 smp_mb();
132
133                 old.v = READ_ONCE(lock->state.v);
134                 ret = !(old.v & l[type].lock_fail);
135
136                 this_cpu_sub(*lock->readers, !ret);
137                 preempt_enable();
138
139                 /*
140                  * If we failed because a writer was trying to take the
141                  * lock, issue a wakeup because we might have caused a
142                  * spurious trylock failure:
143                  */
144                 if (old.write_locking)
145                         ret = -1 - SIX_LOCK_write;
146         } else if (type == SIX_LOCK_write && lock->readers) {
147                 if (try) {
148                         atomic64_add(__SIX_VAL(write_locking, 1),
149                                      &lock->state.counter);
150                         smp_mb__after_atomic();
151                 } else if (!(lock->state.waiters & (1 << SIX_LOCK_write))) {
152                         atomic64_add(__SIX_VAL(waiters, 1 << SIX_LOCK_write),
153                                      &lock->state.counter);
154                         /*
155                          * pairs with barrier after unlock and before checking
156                          * for readers in unlock path
157                          */
158                         smp_mb__after_atomic();
159                 }
160
161                 ret = !pcpu_read_count(lock);
162
163                 /*
164                  * On success, we increment lock->seq; also we clear
165                  * write_locking unless we failed from the lock path:
166                  */
167                 v = 0;
168                 if (ret)
169                         v += __SIX_VAL(seq, 1);
170                 if (ret || try)
171                         v -= __SIX_VAL(write_locking, 1);
172
173                 if (try && !ret) {
174                         old.v = atomic64_add_return(v, &lock->state.counter);
175                         if (old.waiters & (1 << SIX_LOCK_read))
176                                 ret = -1 - SIX_LOCK_read;
177                 } else {
178                         atomic64_add(v, &lock->state.counter);
179                 }
180         } else {
181                 v = READ_ONCE(lock->state.v);
182                 do {
183                         new.v = old.v = v;
184
185                         if (!(old.v & l[type].lock_fail)) {
186                                 new.v += l[type].lock_val;
187
188                                 if (type == SIX_LOCK_write)
189                                         new.write_locking = 0;
190                         } else if (!try && !(new.waiters & (1 << type)))
191                                 new.waiters |= 1 << type;
192                         else
193                                 break; /* waiting bit already set */
194                 } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
195                                         old.v, new.v)) != old.v);
196
197                 ret = !(old.v & l[type].lock_fail);
198
199                 EBUG_ON(ret && !(lock->state.v & l[type].held_mask));
200         }
201
202         if (ret > 0)
203                 six_set_owner(lock, type, old, task);
204
205         EBUG_ON(type == SIX_LOCK_write && (try || ret > 0) && (lock->state.write_locking));
206
207         return ret;
208 }
209
210 static inline void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_type)
211 {
212         struct six_lock_waiter *w, *next;
213         struct task_struct *task;
214         bool saw_one;
215         int ret;
216 again:
217         ret = 0;
218         saw_one = false;
219         raw_spin_lock(&lock->wait_lock);
220
221         list_for_each_entry_safe(w, next, &lock->wait_list, list) {
222                 if (w->lock_want != lock_type)
223                         continue;
224
225                 if (saw_one && lock_type != SIX_LOCK_read)
226                         goto unlock;
227                 saw_one = true;
228
229                 ret = __do_six_trylock_type(lock, lock_type, w->task, false);
230                 if (ret <= 0)
231                         goto unlock;
232
233                 __list_del(w->list.prev, w->list.next);
234                 task = w->task;
235                 /*
236                  * Do no writes to @w besides setting lock_acquired - otherwise
237                  * we would need a memory barrier:
238                  */
239                 barrier();
240                 w->lock_acquired = true;
241                 wake_up_process(task);
242         }
243
244         clear_bit(waitlist_bitnr(lock_type), (unsigned long *) &lock->state.v);
245 unlock:
246         raw_spin_unlock(&lock->wait_lock);
247
248         if (ret < 0) {
249                 lock_type = -ret - 1;
250                 goto again;
251         }
252 }
253
254 static inline void six_lock_wakeup(struct six_lock *lock,
255                                    union six_lock_state state,
256                                    enum six_lock_type lock_type)
257 {
258         if (lock_type == SIX_LOCK_write && state.read_lock)
259                 return;
260
261         if (!(state.waiters & (1 << lock_type)))
262                 return;
263
264         __six_lock_wakeup(lock, lock_type);
265 }
266
267 static bool do_six_trylock_type(struct six_lock *lock,
268                                 enum six_lock_type type,
269                                 bool try)
270 {
271         int ret;
272
273         ret = __do_six_trylock_type(lock, type, current, try);
274         if (ret < 0)
275                 __six_lock_wakeup(lock, -ret - 1);
276
277         return ret > 0;
278 }
279
280 __always_inline __flatten
281 static bool __six_trylock_type(struct six_lock *lock, enum six_lock_type type)
282 {
283         if (!do_six_trylock_type(lock, type, true))
284                 return false;
285
286         if (type != SIX_LOCK_write)
287                 six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read);
288         return true;
289 }
290
291 __always_inline __flatten
292 static bool __six_relock_type(struct six_lock *lock, enum six_lock_type type,
293                               unsigned seq)
294 {
295         const struct six_lock_vals l[] = LOCK_VALS;
296         union six_lock_state old;
297         u64 v;
298
299         EBUG_ON(type == SIX_LOCK_write);
300
301         if (type == SIX_LOCK_read &&
302             lock->readers) {
303                 bool ret;
304
305                 preempt_disable();
306                 this_cpu_inc(*lock->readers);
307
308                 smp_mb();
309
310                 old.v = READ_ONCE(lock->state.v);
311                 ret = !(old.v & l[type].lock_fail) && old.seq == seq;
312
313                 this_cpu_sub(*lock->readers, !ret);
314                 preempt_enable();
315
316                 /*
317                  * Similar to the lock path, we may have caused a spurious write
318                  * lock fail and need to issue a wakeup:
319                  */
320                 if (old.write_locking)
321                         six_lock_wakeup(lock, old, SIX_LOCK_write);
322
323                 if (ret)
324                         six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read);
325
326                 return ret;
327         }
328
329         v = READ_ONCE(lock->state.v);
330         do {
331                 old.v = v;
332
333                 if (old.seq != seq || old.v & l[type].lock_fail)
334                         return false;
335         } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
336                                 old.v,
337                                 old.v + l[type].lock_val)) != old.v);
338
339         six_set_owner(lock, type, old, current);
340         if (type != SIX_LOCK_write)
341                 six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read);
342         return true;
343 }
344
345 /*
346  * We don't see stable performance with SIX_LOCK_SPIN_ON_OWNER enabled, so it's
347  * off for now:
348  */
349 #ifdef SIX_LOCK_SPIN_ON_OWNER
350
351 static inline bool six_optimistic_spin(struct six_lock *lock,
352                                        struct six_lock_waiter *wait)
353 {
354         struct task_struct *owner, *task = current;
355
356         switch (wait->lock_want) {
357         case SIX_LOCK_read:
358                 break;
359         case SIX_LOCK_intent:
360                 if (lock->wait_list.next != &wait->list)
361                         return false;
362                 break;
363         case SIX_LOCK_write:
364                 return false;
365         }
366
367         rcu_read_lock();
368         owner = READ_ONCE(lock->owner);
369
370         while (owner && lock->owner == owner) {
371                 /*
372                  * Ensure we emit the owner->on_cpu, dereference _after_
373                  * checking lock->owner still matches owner. If that fails,
374                  * owner might point to freed memory. If it still matches,
375                  * the rcu_read_lock() ensures the memory stays valid.
376                  */
377                 barrier();
378
379                 /*
380                  * If we're an RT task that will live-lock because we won't let
381                  * the owner complete.
382                  */
383                 if (wait->lock_acquired ||
384                     !owner->on_cpu ||
385                     rt_task(task) ||
386                     need_resched())
387                         break;
388
389                 cpu_relax();
390         }
391         rcu_read_unlock();
392
393         return wait->lock_acquired;
394 }
395
396 #else /* CONFIG_LOCK_SPIN_ON_OWNER */
397
398 static inline bool six_optimistic_spin(struct six_lock *lock,
399                                        struct six_lock_waiter *wait)
400 {
401         return false;
402 }
403
404 #endif
405
406 noinline
407 static int __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type type,
408                                     struct six_lock_waiter *wait,
409                                     six_lock_should_sleep_fn should_sleep_fn, void *p)
410 {
411         union six_lock_state old;
412         int ret = 0;
413
414         if (type == SIX_LOCK_write) {
415                 EBUG_ON(lock->state.write_locking);
416                 atomic64_add(__SIX_VAL(write_locking, 1), &lock->state.counter);
417                 smp_mb__after_atomic();
418         }
419
420         lock_contended(&lock->dep_map, _RET_IP_);
421
422         wait->task              = current;
423         wait->lock_want         = type;
424         wait->lock_acquired     = false;
425
426         raw_spin_lock(&lock->wait_lock);
427         if (!(lock->state.waiters & (1 << type)))
428                 set_bit(waitlist_bitnr(type), (unsigned long *) &lock->state.v);
429         /*
430          * Retry taking the lock after taking waitlist lock, have raced with an
431          * unlock:
432          */
433         ret = __do_six_trylock_type(lock, type, current, false);
434         if (ret <= 0) {
435                 wait->start_time = local_clock();
436
437                 if (!list_empty(&lock->wait_list)) {
438                         struct six_lock_waiter *last =
439                                 list_last_entry(&lock->wait_list,
440                                         struct six_lock_waiter, list);
441
442                         if (time_before_eq64(wait->start_time, last->start_time))
443                                 wait->start_time = last->start_time + 1;
444                 }
445
446                 list_add_tail(&wait->list, &lock->wait_list);
447         }
448         raw_spin_unlock(&lock->wait_lock);
449
450         if (unlikely(ret > 0)) {
451                 ret = 0;
452                 goto out;
453         }
454
455         if (unlikely(ret < 0)) {
456                 __six_lock_wakeup(lock, -ret - 1);
457                 ret = 0;
458         }
459
460         if (six_optimistic_spin(lock, wait))
461                 goto out;
462
463         while (1) {
464                 set_current_state(TASK_UNINTERRUPTIBLE);
465
466                 if (wait->lock_acquired)
467                         break;
468
469                 ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
470                 if (unlikely(ret)) {
471                         raw_spin_lock(&lock->wait_lock);
472                         if (!wait->lock_acquired)
473                                 list_del(&wait->list);
474                         raw_spin_unlock(&lock->wait_lock);
475
476                         if (wait->lock_acquired)
477                                 do_six_unlock_type(lock, type);
478                         break;
479                 }
480
481                 schedule();
482         }
483
484         __set_current_state(TASK_RUNNING);
485 out:
486         if (ret && type == SIX_LOCK_write && lock->state.write_locking) {
487                 old.v = atomic64_sub_return(__SIX_VAL(write_locking, 1),
488                                             &lock->state.counter);
489                 six_lock_wakeup(lock, old, SIX_LOCK_read);
490         }
491
492         return ret;
493 }
494
495 __always_inline __flatten
496 static int __six_lock_type_waiter(struct six_lock *lock, enum six_lock_type type,
497                          struct six_lock_waiter *wait,
498                          six_lock_should_sleep_fn should_sleep_fn, void *p)
499 {
500         int ret;
501
502         wait->start_time = 0;
503
504         if (type != SIX_LOCK_write)
505                 six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read);
506
507         ret = do_six_trylock_type(lock, type, true) ? 0
508                 : __six_lock_type_slowpath(lock, type, wait, should_sleep_fn, p);
509
510         if (ret && type != SIX_LOCK_write)
511                 six_release(&lock->dep_map);
512         if (!ret)
513                 lock_acquired(&lock->dep_map, _RET_IP_);
514
515         return ret;
516 }
517
518 __always_inline
519 static int __six_lock_type(struct six_lock *lock, enum six_lock_type type,
520                            six_lock_should_sleep_fn should_sleep_fn, void *p)
521 {
522         struct six_lock_waiter wait;
523
524         return __six_lock_type_waiter(lock, type, &wait, should_sleep_fn, p);
525 }
526
527 __always_inline __flatten
528 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type)
529 {
530         const struct six_lock_vals l[] = LOCK_VALS;
531         union six_lock_state state;
532
533         if (type == SIX_LOCK_intent)
534                 lock->owner = NULL;
535
536         if (type == SIX_LOCK_read &&
537             lock->readers) {
538                 smp_mb(); /* unlock barrier */
539                 this_cpu_dec(*lock->readers);
540                 smp_mb(); /* between unlocking and checking for waiters */
541                 state.v = READ_ONCE(lock->state.v);
542         } else {
543                 EBUG_ON(!(lock->state.v & l[type].held_mask));
544                 state.v = atomic64_add_return_release(l[type].unlock_val,
545                                                       &lock->state.counter);
546         }
547
548         six_lock_wakeup(lock, state, l[type].unlock_wakeup);
549 }
550
551 __always_inline __flatten
552 static void __six_unlock_type(struct six_lock *lock, enum six_lock_type type)
553 {
554         EBUG_ON(type == SIX_LOCK_write &&
555                 !(lock->state.v & __SIX_LOCK_HELD_intent));
556         EBUG_ON((type == SIX_LOCK_write ||
557                  type == SIX_LOCK_intent) &&
558                 lock->owner != current);
559
560         if (type != SIX_LOCK_write)
561                 six_release(&lock->dep_map);
562
563         if (type == SIX_LOCK_intent &&
564             lock->intent_lock_recurse) {
565                 --lock->intent_lock_recurse;
566                 return;
567         }
568
569         do_six_unlock_type(lock, type);
570 }
571
572 #define __SIX_LOCK(type)                                                \
573 bool six_trylock_##type(struct six_lock *lock)                          \
574 {                                                                       \
575         return __six_trylock_type(lock, SIX_LOCK_##type);               \
576 }                                                                       \
577 EXPORT_SYMBOL_GPL(six_trylock_##type);                                  \
578                                                                         \
579 bool six_relock_##type(struct six_lock *lock, u32 seq)                  \
580 {                                                                       \
581         return __six_relock_type(lock, SIX_LOCK_##type, seq);           \
582 }                                                                       \
583 EXPORT_SYMBOL_GPL(six_relock_##type);                                   \
584                                                                         \
585 int six_lock_##type(struct six_lock *lock,                              \
586                     six_lock_should_sleep_fn should_sleep_fn, void *p)  \
587 {                                                                       \
588         return __six_lock_type(lock, SIX_LOCK_##type, should_sleep_fn, p);\
589 }                                                                       \
590 EXPORT_SYMBOL_GPL(six_lock_##type);                                     \
591                                                                         \
592 int six_lock_waiter_##type(struct six_lock *lock,                       \
593                            struct six_lock_waiter *wait,                \
594                            six_lock_should_sleep_fn should_sleep_fn, void *p)\
595 {                                                                       \
596         return __six_lock_type_waiter(lock, SIX_LOCK_##type, wait, should_sleep_fn, p);\
597 }                                                                       \
598 EXPORT_SYMBOL_GPL(six_lock_waiter_##type);                              \
599                                                                         \
600 void six_unlock_##type(struct six_lock *lock)                           \
601 {                                                                       \
602         __six_unlock_type(lock, SIX_LOCK_##type);                       \
603 }                                                                       \
604 EXPORT_SYMBOL_GPL(six_unlock_##type);
605
606 __SIX_LOCK(read)
607 __SIX_LOCK(intent)
608 __SIX_LOCK(write)
609
610 #undef __SIX_LOCK
611
612 /* Convert from intent to read: */
613 void six_lock_downgrade(struct six_lock *lock)
614 {
615         six_lock_increment(lock, SIX_LOCK_read);
616         six_unlock_intent(lock);
617 }
618 EXPORT_SYMBOL_GPL(six_lock_downgrade);
619
620 bool six_lock_tryupgrade(struct six_lock *lock)
621 {
622         union six_lock_state old, new;
623         u64 v = READ_ONCE(lock->state.v);
624
625         do {
626                 new.v = old.v = v;
627
628                 if (new.intent_lock)
629                         return false;
630
631                 if (!lock->readers) {
632                         EBUG_ON(!new.read_lock);
633                         new.read_lock--;
634                 }
635
636                 new.intent_lock = 1;
637         } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
638                                 old.v, new.v)) != old.v);
639
640         if (lock->readers)
641                 this_cpu_dec(*lock->readers);
642
643         six_set_owner(lock, SIX_LOCK_intent, old, current);
644
645         return true;
646 }
647 EXPORT_SYMBOL_GPL(six_lock_tryupgrade);
648
649 bool six_trylock_convert(struct six_lock *lock,
650                          enum six_lock_type from,
651                          enum six_lock_type to)
652 {
653         EBUG_ON(to == SIX_LOCK_write || from == SIX_LOCK_write);
654
655         if (to == from)
656                 return true;
657
658         if (to == SIX_LOCK_read) {
659                 six_lock_downgrade(lock);
660                 return true;
661         } else {
662                 return six_lock_tryupgrade(lock);
663         }
664 }
665 EXPORT_SYMBOL_GPL(six_trylock_convert);
666
667 /*
668  * Increment read/intent lock count, assuming we already have it read or intent
669  * locked:
670  */
671 void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
672 {
673         const struct six_lock_vals l[] = LOCK_VALS;
674
675         six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read);
676
677         /* XXX: assert already locked, and that we don't overflow: */
678
679         switch (type) {
680         case SIX_LOCK_read:
681                 if (lock->readers) {
682                         this_cpu_inc(*lock->readers);
683                 } else {
684                         EBUG_ON(!lock->state.read_lock &&
685                                 !lock->state.intent_lock);
686                         atomic64_add(l[type].lock_val, &lock->state.counter);
687                 }
688                 break;
689         case SIX_LOCK_intent:
690                 EBUG_ON(!lock->state.intent_lock);
691                 lock->intent_lock_recurse++;
692                 break;
693         case SIX_LOCK_write:
694                 BUG();
695                 break;
696         }
697 }
698 EXPORT_SYMBOL_GPL(six_lock_increment);
699
700 void six_lock_wakeup_all(struct six_lock *lock)
701 {
702         union six_lock_state state = lock->state;
703         struct six_lock_waiter *w;
704
705         six_lock_wakeup(lock, state, SIX_LOCK_read);
706         six_lock_wakeup(lock, state, SIX_LOCK_intent);
707         six_lock_wakeup(lock, state, SIX_LOCK_write);
708
709         raw_spin_lock(&lock->wait_lock);
710         list_for_each_entry(w, &lock->wait_list, list)
711                 wake_up_process(w->task);
712         raw_spin_unlock(&lock->wait_lock);
713 }
714 EXPORT_SYMBOL_GPL(six_lock_wakeup_all);
715
716 void six_lock_pcpu_free(struct six_lock *lock)
717 {
718         BUG_ON(lock->readers && pcpu_read_count(lock));
719         BUG_ON(lock->state.read_lock);
720
721         free_percpu(lock->readers);
722         lock->readers = NULL;
723 }
724 EXPORT_SYMBOL_GPL(six_lock_pcpu_free);
725
726 void six_lock_pcpu_alloc(struct six_lock *lock)
727 {
728 #ifdef __KERNEL__
729         if (!lock->readers)
730                 lock->readers = alloc_percpu(unsigned);
731 #endif
732 }
733 EXPORT_SYMBOL_GPL(six_lock_pcpu_alloc);
734
735 /*
736  * Returns lock held counts, for both read and intent
737  */
738 struct six_lock_count six_lock_counts(struct six_lock *lock)
739 {
740         struct six_lock_count ret;
741
742         ret.n[SIX_LOCK_read]    = 0;
743         ret.n[SIX_LOCK_intent]  = lock->state.intent_lock + lock->intent_lock_recurse;
744         ret.n[SIX_LOCK_write]   = lock->state.seq & 1;
745
746         if (!lock->readers)
747                 ret.n[SIX_LOCK_read] += lock->state.read_lock;
748         else {
749                 int cpu;
750
751                 for_each_possible_cpu(cpu)
752                         ret.n[SIX_LOCK_read] += *per_cpu_ptr(lock->readers, cpu);
753         }
754
755         return ret;
756 }
757 EXPORT_SYMBOL_GPL(six_lock_counts);