]> git.sesse.net Git - bcachefs-tools-debian/blob - linux/six.c
Update bcachefs sources to 3e93567c51 bcachefs: Switch to local_clock() for fastpath...
[bcachefs-tools-debian] / linux / six.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/export.h>
4 #include <linux/log2.h>
5 #include <linux/percpu.h>
6 #include <linux/preempt.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/sched/rt.h>
11 #include <linux/six.h>
12 #include <linux/slab.h>
13
14 #ifdef DEBUG
15 #define EBUG_ON(cond)           BUG_ON(cond)
16 #else
17 #define EBUG_ON(cond)           do {} while (0)
18 #endif
19
20 #define six_acquire(l, t, r)    lock_acquire(l, 0, t, r, 1, NULL, _RET_IP_)
21 #define six_release(l)          lock_release(l, _RET_IP_)
22
23 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type);
24
25 struct six_lock_vals {
26         /* Value we add to the lock in order to take the lock: */
27         u64                     lock_val;
28
29         /* If the lock has this value (used as a mask), taking the lock fails: */
30         u64                     lock_fail;
31
32         /* Value we add to the lock in order to release the lock: */
33         u64                     unlock_val;
34
35         /* Mask that indicates lock is held for this type: */
36         u64                     held_mask;
37
38         /* Waitlist we wakeup when releasing the lock: */
39         enum six_lock_type      unlock_wakeup;
40 };
41
42 #define __SIX_LOCK_HELD_read    __SIX_VAL(read_lock, ~0)
43 #define __SIX_LOCK_HELD_intent  __SIX_VAL(intent_lock, ~0)
44 #define __SIX_LOCK_HELD_write   __SIX_VAL(seq, 1)
45
46 #define LOCK_VALS {                                                     \
47         [SIX_LOCK_read] = {                                             \
48                 .lock_val       = __SIX_VAL(read_lock, 1),              \
49                 .lock_fail      = __SIX_LOCK_HELD_write + __SIX_VAL(write_locking, 1),\
50                 .unlock_val     = -__SIX_VAL(read_lock, 1),             \
51                 .held_mask      = __SIX_LOCK_HELD_read,                 \
52                 .unlock_wakeup  = SIX_LOCK_write,                       \
53         },                                                              \
54         [SIX_LOCK_intent] = {                                           \
55                 .lock_val       = __SIX_VAL(intent_lock, 1),            \
56                 .lock_fail      = __SIX_LOCK_HELD_intent,               \
57                 .unlock_val     = -__SIX_VAL(intent_lock, 1),           \
58                 .held_mask      = __SIX_LOCK_HELD_intent,               \
59                 .unlock_wakeup  = SIX_LOCK_intent,                      \
60         },                                                              \
61         [SIX_LOCK_write] = {                                            \
62                 .lock_val       = __SIX_VAL(seq, 1),                    \
63                 .lock_fail      = __SIX_LOCK_HELD_read,                 \
64                 .unlock_val     = __SIX_VAL(seq, 1),                    \
65                 .held_mask      = __SIX_LOCK_HELD_write,                \
66                 .unlock_wakeup  = SIX_LOCK_read,                        \
67         },                                                              \
68 }
69
70 static inline void six_set_owner(struct six_lock *lock, enum six_lock_type type,
71                                  union six_lock_state old,
72                                  struct task_struct *owner)
73 {
74         if (type != SIX_LOCK_intent)
75                 return;
76
77         if (!old.intent_lock) {
78                 EBUG_ON(lock->owner);
79                 lock->owner = owner;
80         } else {
81                 EBUG_ON(lock->owner != current);
82         }
83 }
84
85 static inline unsigned pcpu_read_count(struct six_lock *lock)
86 {
87         unsigned read_count = 0;
88         int cpu;
89
90         for_each_possible_cpu(cpu)
91                 read_count += *per_cpu_ptr(lock->readers, cpu);
92         return read_count;
93 }
94
95 /* This is probably up there with the more evil things I've done */
96 #define waitlist_bitnr(id) ilog2((((union six_lock_state) { .waiters = 1 << (id) }).l))
97
98 static int __do_six_trylock_type(struct six_lock *lock,
99                                  enum six_lock_type type,
100                                  struct task_struct *task,
101                                  bool try)
102 {
103         const struct six_lock_vals l[] = LOCK_VALS;
104         union six_lock_state old, new;
105         int ret;
106         u64 v;
107
108         EBUG_ON(type == SIX_LOCK_write && lock->owner != task);
109         EBUG_ON(type == SIX_LOCK_write && (lock->state.seq & 1));
110         EBUG_ON(type == SIX_LOCK_write && (try != !(lock->state.write_locking)));
111
112         /*
113          * Percpu reader mode:
114          *
115          * The basic idea behind this algorithm is that you can implement a lock
116          * between two threads without any atomics, just memory barriers:
117          *
118          * For two threads you'll need two variables, one variable for "thread a
119          * has the lock" and another for "thread b has the lock".
120          *
121          * To take the lock, a thread sets its variable indicating that it holds
122          * the lock, then issues a full memory barrier, then reads from the
123          * other thread's variable to check if the other thread thinks it has
124          * the lock. If we raced, we backoff and retry/sleep.
125          */
126
127         if (type == SIX_LOCK_read && lock->readers) {
128                 preempt_disable();
129                 this_cpu_inc(*lock->readers); /* signal that we own lock */
130
131                 smp_mb();
132
133                 old.v = READ_ONCE(lock->state.v);
134                 ret = !(old.v & l[type].lock_fail);
135
136                 this_cpu_sub(*lock->readers, !ret);
137                 preempt_enable();
138
139                 /*
140                  * If we failed because a writer was trying to take the
141                  * lock, issue a wakeup because we might have caused a
142                  * spurious trylock failure:
143                  */
144                 if (old.write_locking)
145                         ret = -1 - SIX_LOCK_write;
146         } else if (type == SIX_LOCK_write && lock->readers) {
147                 if (try) {
148                         atomic64_add(__SIX_VAL(write_locking, 1),
149                                      &lock->state.counter);
150                         smp_mb__after_atomic();
151                 } else if (!(lock->state.waiters & (1 << SIX_LOCK_write))) {
152                         atomic64_add(__SIX_VAL(waiters, 1 << SIX_LOCK_write),
153                                      &lock->state.counter);
154                         /*
155                          * pairs with barrier after unlock and before checking
156                          * for readers in unlock path
157                          */
158                         smp_mb__after_atomic();
159                 }
160
161                 ret = !pcpu_read_count(lock);
162
163                 /*
164                  * On success, we increment lock->seq; also we clear
165                  * write_locking unless we failed from the lock path:
166                  */
167                 v = 0;
168                 if (ret)
169                         v += __SIX_VAL(seq, 1);
170                 if (ret || try)
171                         v -= __SIX_VAL(write_locking, 1);
172
173                 if (try && !ret) {
174                         old.v = atomic64_add_return(v, &lock->state.counter);
175                         if (old.waiters & (1 << SIX_LOCK_read))
176                                 ret = -1 - SIX_LOCK_read;
177                 } else {
178                         atomic64_add(v, &lock->state.counter);
179                 }
180         } else {
181                 v = READ_ONCE(lock->state.v);
182                 do {
183                         new.v = old.v = v;
184
185                         if (!(old.v & l[type].lock_fail)) {
186                                 new.v += l[type].lock_val;
187
188                                 if (type == SIX_LOCK_write)
189                                         new.write_locking = 0;
190                         } else if (!try && !(new.waiters & (1 << type)))
191                                 new.waiters |= 1 << type;
192                         else
193                                 break; /* waiting bit already set */
194                 } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
195                                         old.v, new.v)) != old.v);
196
197                 ret = !(old.v & l[type].lock_fail);
198
199                 EBUG_ON(ret && !(lock->state.v & l[type].held_mask));
200         }
201
202         if (ret > 0)
203                 six_set_owner(lock, type, old, task);
204
205         EBUG_ON(type == SIX_LOCK_write && (try || ret > 0) && (lock->state.write_locking));
206
207         return ret;
208 }
209
210 static inline void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_type)
211 {
212         struct six_lock_waiter *w, *next;
213         struct task_struct *task;
214         bool saw_one;
215         int ret;
216 again:
217         ret = 0;
218         saw_one = false;
219         raw_spin_lock(&lock->wait_lock);
220
221         list_for_each_entry_safe(w, next, &lock->wait_list, list) {
222                 if (w->lock_want != lock_type)
223                         continue;
224
225                 if (saw_one && lock_type != SIX_LOCK_read)
226                         goto unlock;
227                 saw_one = true;
228
229                 ret = __do_six_trylock_type(lock, lock_type, w->task, false);
230                 if (ret <= 0)
231                         goto unlock;
232
233                 __list_del(w->list.prev, w->list.next);
234                 task = w->task;
235                 /*
236                  * Do no writes to @w besides setting lock_acquired - otherwise
237                  * we would need a memory barrier:
238                  */
239                 barrier();
240                 w->lock_acquired = true;
241                 wake_up_process(task);
242         }
243
244         clear_bit(waitlist_bitnr(lock_type), (unsigned long *) &lock->state.v);
245 unlock:
246         raw_spin_unlock(&lock->wait_lock);
247
248         if (ret < 0) {
249                 lock_type = -ret - 1;
250                 goto again;
251         }
252 }
253
254 static inline void six_lock_wakeup(struct six_lock *lock,
255                                    union six_lock_state state,
256                                    enum six_lock_type lock_type)
257 {
258         if (lock_type == SIX_LOCK_write && state.read_lock)
259                 return;
260
261         if (!(state.waiters & (1 << lock_type)))
262                 return;
263
264         __six_lock_wakeup(lock, lock_type);
265 }
266
267 static bool do_six_trylock_type(struct six_lock *lock,
268                                 enum six_lock_type type,
269                                 bool try)
270 {
271         int ret;
272
273         ret = __do_six_trylock_type(lock, type, current, try);
274         if (ret < 0)
275                 __six_lock_wakeup(lock, -ret - 1);
276
277         return ret > 0;
278 }
279
280 __always_inline __flatten
281 static bool __six_trylock_type(struct six_lock *lock, enum six_lock_type type)
282 {
283         if (!do_six_trylock_type(lock, type, true))
284                 return false;
285
286         if (type != SIX_LOCK_write)
287                 six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read);
288         return true;
289 }
290
291 __always_inline __flatten
292 static bool __six_relock_type(struct six_lock *lock, enum six_lock_type type,
293                               unsigned seq)
294 {
295         const struct six_lock_vals l[] = LOCK_VALS;
296         union six_lock_state old;
297         u64 v;
298
299         EBUG_ON(type == SIX_LOCK_write);
300
301         if (type == SIX_LOCK_read &&
302             lock->readers) {
303                 bool ret;
304
305                 preempt_disable();
306                 this_cpu_inc(*lock->readers);
307
308                 smp_mb();
309
310                 old.v = READ_ONCE(lock->state.v);
311                 ret = !(old.v & l[type].lock_fail) && old.seq == seq;
312
313                 this_cpu_sub(*lock->readers, !ret);
314                 preempt_enable();
315
316                 /*
317                  * Similar to the lock path, we may have caused a spurious write
318                  * lock fail and need to issue a wakeup:
319                  */
320                 if (old.write_locking)
321                         six_lock_wakeup(lock, old, SIX_LOCK_write);
322
323                 if (ret)
324                         six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read);
325
326                 return ret;
327         }
328
329         v = READ_ONCE(lock->state.v);
330         do {
331                 old.v = v;
332
333                 if (old.seq != seq || old.v & l[type].lock_fail)
334                         return false;
335         } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
336                                 old.v,
337                                 old.v + l[type].lock_val)) != old.v);
338
339         six_set_owner(lock, type, old, current);
340         if (type != SIX_LOCK_write)
341                 six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read);
342         return true;
343 }
344
345 #ifdef CONFIG_LOCK_SPIN_ON_OWNER
346
347 static inline bool six_optimistic_spin(struct six_lock *lock,
348                                        struct six_lock_waiter *wait)
349 {
350         struct task_struct *owner, *task = current;
351
352         switch (wait->lock_want) {
353         case SIX_LOCK_read:
354                 break;
355         case SIX_LOCK_intent:
356                 if (lock->wait_list.next != &wait->list)
357                         return false;
358                 break;
359         case SIX_LOCK_write:
360                 return false;
361         }
362
363         rcu_read_lock();
364         owner = READ_ONCE(lock->owner);
365
366         while (owner && lock->owner == owner) {
367                 /*
368                  * Ensure we emit the owner->on_cpu, dereference _after_
369                  * checking lock->owner still matches owner. If that fails,
370                  * owner might point to freed memory. If it still matches,
371                  * the rcu_read_lock() ensures the memory stays valid.
372                  */
373                 barrier();
374
375                 /*
376                  * If we're an RT task that will live-lock because we won't let
377                  * the owner complete.
378                  */
379                 if (wait->lock_acquired ||
380                     !owner->on_cpu ||
381                     rt_task(task) ||
382                     need_resched())
383                         break;
384
385                 cpu_relax();
386         }
387         rcu_read_unlock();
388
389         return wait->lock_acquired;
390 }
391
392 #else /* CONFIG_LOCK_SPIN_ON_OWNER */
393
394 static inline bool six_optimistic_spin(struct six_lock *lock,
395                                        struct six_lock_waiter *wait)
396 {
397         return false;
398 }
399
400 #endif
401
402 noinline
403 static int __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type type,
404                                     struct six_lock_waiter *wait,
405                                     six_lock_should_sleep_fn should_sleep_fn, void *p)
406 {
407         union six_lock_state old;
408         int ret = 0;
409
410         if (type == SIX_LOCK_write) {
411                 EBUG_ON(lock->state.write_locking);
412                 atomic64_add(__SIX_VAL(write_locking, 1), &lock->state.counter);
413                 smp_mb__after_atomic();
414         }
415
416         lock_contended(&lock->dep_map, _RET_IP_);
417
418         wait->task              = current;
419         wait->lock_want         = type;
420         wait->lock_acquired     = false;
421
422         raw_spin_lock(&lock->wait_lock);
423         if (!(lock->state.waiters & (1 << type)))
424                 set_bit(waitlist_bitnr(type), (unsigned long *) &lock->state.v);
425         /*
426          * Retry taking the lock after taking waitlist lock, have raced with an
427          * unlock:
428          */
429         ret = __do_six_trylock_type(lock, type, current, false);
430         if (ret <= 0) {
431                 wait->start_time = local_clock();
432
433                 if (!list_empty(&lock->wait_list)) {
434                         struct six_lock_waiter *last =
435                                 list_last_entry(&lock->wait_list,
436                                         struct six_lock_waiter, list);
437
438                         if (time_before_eq64(wait->start_time, last->start_time))
439                                 wait->start_time = last->start_time + 1;
440                 }
441
442                 list_add_tail(&wait->list, &lock->wait_list);
443         }
444         raw_spin_unlock(&lock->wait_lock);
445
446         if (unlikely(ret > 0)) {
447                 ret = 0;
448                 goto out;
449         }
450
451         if (unlikely(ret < 0)) {
452                 __six_lock_wakeup(lock, -ret - 1);
453                 ret = 0;
454         }
455
456         if (six_optimistic_spin(lock, wait))
457                 goto out;
458
459         while (1) {
460                 set_current_state(TASK_UNINTERRUPTIBLE);
461
462                 if (wait->lock_acquired)
463                         break;
464
465                 ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
466                 if (unlikely(ret)) {
467                         raw_spin_lock(&lock->wait_lock);
468                         if (!wait->lock_acquired)
469                                 list_del(&wait->list);
470                         raw_spin_unlock(&lock->wait_lock);
471
472                         if (wait->lock_acquired)
473                                 do_six_unlock_type(lock, type);
474                         break;
475                 }
476
477                 schedule();
478         }
479
480         __set_current_state(TASK_RUNNING);
481 out:
482         if (ret && type == SIX_LOCK_write && lock->state.write_locking) {
483                 old.v = atomic64_sub_return(__SIX_VAL(write_locking, 1),
484                                             &lock->state.counter);
485                 six_lock_wakeup(lock, old, SIX_LOCK_read);
486         }
487
488         return ret;
489 }
490
491 __always_inline __flatten
492 static int __six_lock_type_waiter(struct six_lock *lock, enum six_lock_type type,
493                          struct six_lock_waiter *wait,
494                          six_lock_should_sleep_fn should_sleep_fn, void *p)
495 {
496         int ret;
497
498         wait->start_time = 0;
499
500         if (type != SIX_LOCK_write)
501                 six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read);
502
503         ret = do_six_trylock_type(lock, type, true) ? 0
504                 : __six_lock_type_slowpath(lock, type, wait, should_sleep_fn, p);
505
506         if (ret && type != SIX_LOCK_write)
507                 six_release(&lock->dep_map);
508         if (!ret)
509                 lock_acquired(&lock->dep_map, _RET_IP_);
510
511         return ret;
512 }
513
514 __always_inline
515 static int __six_lock_type(struct six_lock *lock, enum six_lock_type type,
516                            six_lock_should_sleep_fn should_sleep_fn, void *p)
517 {
518         struct six_lock_waiter wait;
519
520         return __six_lock_type_waiter(lock, type, &wait, should_sleep_fn, p);
521 }
522
523 __always_inline __flatten
524 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type)
525 {
526         const struct six_lock_vals l[] = LOCK_VALS;
527         union six_lock_state state;
528
529         if (type == SIX_LOCK_intent)
530                 lock->owner = NULL;
531
532         if (type == SIX_LOCK_read &&
533             lock->readers) {
534                 smp_mb(); /* unlock barrier */
535                 this_cpu_dec(*lock->readers);
536                 smp_mb(); /* between unlocking and checking for waiters */
537                 state.v = READ_ONCE(lock->state.v);
538         } else {
539                 EBUG_ON(!(lock->state.v & l[type].held_mask));
540                 state.v = atomic64_add_return_release(l[type].unlock_val,
541                                                       &lock->state.counter);
542         }
543
544         six_lock_wakeup(lock, state, l[type].unlock_wakeup);
545 }
546
547 __always_inline __flatten
548 static void __six_unlock_type(struct six_lock *lock, enum six_lock_type type)
549 {
550         EBUG_ON(type == SIX_LOCK_write &&
551                 !(lock->state.v & __SIX_LOCK_HELD_intent));
552         EBUG_ON((type == SIX_LOCK_write ||
553                  type == SIX_LOCK_intent) &&
554                 lock->owner != current);
555
556         if (type != SIX_LOCK_write)
557                 six_release(&lock->dep_map);
558
559         if (type == SIX_LOCK_intent &&
560             lock->intent_lock_recurse) {
561                 --lock->intent_lock_recurse;
562                 return;
563         }
564
565         do_six_unlock_type(lock, type);
566 }
567
568 #define __SIX_LOCK(type)                                                \
569 bool six_trylock_##type(struct six_lock *lock)                          \
570 {                                                                       \
571         return __six_trylock_type(lock, SIX_LOCK_##type);               \
572 }                                                                       \
573 EXPORT_SYMBOL_GPL(six_trylock_##type);                                  \
574                                                                         \
575 bool six_relock_##type(struct six_lock *lock, u32 seq)                  \
576 {                                                                       \
577         return __six_relock_type(lock, SIX_LOCK_##type, seq);           \
578 }                                                                       \
579 EXPORT_SYMBOL_GPL(six_relock_##type);                                   \
580                                                                         \
581 int six_lock_##type(struct six_lock *lock,                              \
582                     six_lock_should_sleep_fn should_sleep_fn, void *p)  \
583 {                                                                       \
584         return __six_lock_type(lock, SIX_LOCK_##type, should_sleep_fn, p);\
585 }                                                                       \
586 EXPORT_SYMBOL_GPL(six_lock_##type);                                     \
587                                                                         \
588 int six_lock_waiter_##type(struct six_lock *lock,                       \
589                            struct six_lock_waiter *wait,                \
590                            six_lock_should_sleep_fn should_sleep_fn, void *p)\
591 {                                                                       \
592         return __six_lock_type_waiter(lock, SIX_LOCK_##type, wait, should_sleep_fn, p);\
593 }                                                                       \
594 EXPORT_SYMBOL_GPL(six_lock_waiter_##type);                              \
595                                                                         \
596 void six_unlock_##type(struct six_lock *lock)                           \
597 {                                                                       \
598         __six_unlock_type(lock, SIX_LOCK_##type);                       \
599 }                                                                       \
600 EXPORT_SYMBOL_GPL(six_unlock_##type);
601
602 __SIX_LOCK(read)
603 __SIX_LOCK(intent)
604 __SIX_LOCK(write)
605
606 #undef __SIX_LOCK
607
608 /* Convert from intent to read: */
609 void six_lock_downgrade(struct six_lock *lock)
610 {
611         six_lock_increment(lock, SIX_LOCK_read);
612         six_unlock_intent(lock);
613 }
614 EXPORT_SYMBOL_GPL(six_lock_downgrade);
615
616 bool six_lock_tryupgrade(struct six_lock *lock)
617 {
618         union six_lock_state old, new;
619         u64 v = READ_ONCE(lock->state.v);
620
621         do {
622                 new.v = old.v = v;
623
624                 if (new.intent_lock)
625                         return false;
626
627                 if (!lock->readers) {
628                         EBUG_ON(!new.read_lock);
629                         new.read_lock--;
630                 }
631
632                 new.intent_lock = 1;
633         } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
634                                 old.v, new.v)) != old.v);
635
636         if (lock->readers)
637                 this_cpu_dec(*lock->readers);
638
639         six_set_owner(lock, SIX_LOCK_intent, old, current);
640
641         return true;
642 }
643 EXPORT_SYMBOL_GPL(six_lock_tryupgrade);
644
645 bool six_trylock_convert(struct six_lock *lock,
646                          enum six_lock_type from,
647                          enum six_lock_type to)
648 {
649         EBUG_ON(to == SIX_LOCK_write || from == SIX_LOCK_write);
650
651         if (to == from)
652                 return true;
653
654         if (to == SIX_LOCK_read) {
655                 six_lock_downgrade(lock);
656                 return true;
657         } else {
658                 return six_lock_tryupgrade(lock);
659         }
660 }
661 EXPORT_SYMBOL_GPL(six_trylock_convert);
662
663 /*
664  * Increment read/intent lock count, assuming we already have it read or intent
665  * locked:
666  */
667 void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
668 {
669         const struct six_lock_vals l[] = LOCK_VALS;
670
671         six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read);
672
673         /* XXX: assert already locked, and that we don't overflow: */
674
675         switch (type) {
676         case SIX_LOCK_read:
677                 if (lock->readers) {
678                         this_cpu_inc(*lock->readers);
679                 } else {
680                         EBUG_ON(!lock->state.read_lock &&
681                                 !lock->state.intent_lock);
682                         atomic64_add(l[type].lock_val, &lock->state.counter);
683                 }
684                 break;
685         case SIX_LOCK_intent:
686                 EBUG_ON(!lock->state.intent_lock);
687                 lock->intent_lock_recurse++;
688                 break;
689         case SIX_LOCK_write:
690                 BUG();
691                 break;
692         }
693 }
694 EXPORT_SYMBOL_GPL(six_lock_increment);
695
696 void six_lock_wakeup_all(struct six_lock *lock)
697 {
698         union six_lock_state state = lock->state;
699         struct six_lock_waiter *w;
700
701         six_lock_wakeup(lock, state, SIX_LOCK_read);
702         six_lock_wakeup(lock, state, SIX_LOCK_intent);
703         six_lock_wakeup(lock, state, SIX_LOCK_write);
704
705         raw_spin_lock(&lock->wait_lock);
706         list_for_each_entry(w, &lock->wait_list, list)
707                 wake_up_process(w->task);
708         raw_spin_unlock(&lock->wait_lock);
709 }
710 EXPORT_SYMBOL_GPL(six_lock_wakeup_all);
711
712 void six_lock_pcpu_free(struct six_lock *lock)
713 {
714         BUG_ON(lock->readers && pcpu_read_count(lock));
715         BUG_ON(lock->state.read_lock);
716
717         free_percpu(lock->readers);
718         lock->readers = NULL;
719 }
720 EXPORT_SYMBOL_GPL(six_lock_pcpu_free);
721
722 void six_lock_pcpu_alloc(struct six_lock *lock)
723 {
724 #ifdef __KERNEL__
725         if (!lock->readers)
726                 lock->readers = alloc_percpu(unsigned);
727 #endif
728 }
729 EXPORT_SYMBOL_GPL(six_lock_pcpu_alloc);
730
731 /*
732  * Returns lock held counts, for both read and intent
733  */
734 struct six_lock_count six_lock_counts(struct six_lock *lock)
735 {
736         struct six_lock_count ret;
737
738         ret.n[SIX_LOCK_read]    = 0;
739         ret.n[SIX_LOCK_intent]  = lock->state.intent_lock + lock->intent_lock_recurse;
740         ret.n[SIX_LOCK_write]   = lock->state.seq & 1;
741
742         if (!lock->readers)
743                 ret.n[SIX_LOCK_read] += lock->state.read_lock;
744         else {
745                 int cpu;
746
747                 for_each_possible_cpu(cpu)
748                         ret.n[SIX_LOCK_read] += *per_cpu_ptr(lock->readers, cpu);
749         }
750
751         return ret;
752 }
753 EXPORT_SYMBOL_GPL(six_lock_counts);