]> git.sesse.net Git - bcachefs-tools-debian/blob - linux/six.c
Update bcachefs sources to 24c6361e20 bcachefs: Fix a trans path overflow in bch2_btr...
[bcachefs-tools-debian] / linux / six.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/export.h>
4 #include <linux/log2.h>
5 #include <linux/percpu.h>
6 #include <linux/preempt.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/sched/rt.h>
11 #include <linux/six.h>
12 #include <linux/slab.h>
13
14 #ifdef DEBUG
15 #define EBUG_ON(cond)           BUG_ON(cond)
16 #else
17 #define EBUG_ON(cond)           do {} while (0)
18 #endif
19
20 #define six_acquire(l, t, r)    lock_acquire(l, 0, t, r, 1, NULL, _RET_IP_)
21 #define six_release(l)          lock_release(l, _RET_IP_)
22
23 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type);
24
25 struct six_lock_vals {
26         /* Value we add to the lock in order to take the lock: */
27         u64                     lock_val;
28
29         /* If the lock has this value (used as a mask), taking the lock fails: */
30         u64                     lock_fail;
31
32         /* Value we add to the lock in order to release the lock: */
33         u64                     unlock_val;
34
35         /* Mask that indicates lock is held for this type: */
36         u64                     held_mask;
37
38         /* Waitlist we wakeup when releasing the lock: */
39         enum six_lock_type      unlock_wakeup;
40 };
41
42 #define __SIX_LOCK_HELD_read    __SIX_VAL(read_lock, ~0)
43 #define __SIX_LOCK_HELD_intent  __SIX_VAL(intent_lock, ~0)
44 #define __SIX_LOCK_HELD_write   __SIX_VAL(seq, 1)
45
46 #define LOCK_VALS {                                                     \
47         [SIX_LOCK_read] = {                                             \
48                 .lock_val       = __SIX_VAL(read_lock, 1),              \
49                 .lock_fail      = __SIX_LOCK_HELD_write + __SIX_VAL(write_locking, 1),\
50                 .unlock_val     = -__SIX_VAL(read_lock, 1),             \
51                 .held_mask      = __SIX_LOCK_HELD_read,                 \
52                 .unlock_wakeup  = SIX_LOCK_write,                       \
53         },                                                              \
54         [SIX_LOCK_intent] = {                                           \
55                 .lock_val       = __SIX_VAL(intent_lock, 1),            \
56                 .lock_fail      = __SIX_LOCK_HELD_intent,               \
57                 .unlock_val     = -__SIX_VAL(intent_lock, 1),           \
58                 .held_mask      = __SIX_LOCK_HELD_intent,               \
59                 .unlock_wakeup  = SIX_LOCK_intent,                      \
60         },                                                              \
61         [SIX_LOCK_write] = {                                            \
62                 .lock_val       = __SIX_VAL(seq, 1),                    \
63                 .lock_fail      = __SIX_LOCK_HELD_read,                 \
64                 .unlock_val     = __SIX_VAL(seq, 1),                    \
65                 .held_mask      = __SIX_LOCK_HELD_write,                \
66                 .unlock_wakeup  = SIX_LOCK_read,                        \
67         },                                                              \
68 }
69
70 static inline void six_set_owner(struct six_lock *lock, enum six_lock_type type,
71                                  union six_lock_state old,
72                                  struct task_struct *owner)
73 {
74         if (type != SIX_LOCK_intent)
75                 return;
76
77         if (!old.intent_lock) {
78                 EBUG_ON(lock->owner);
79                 lock->owner = owner;
80         } else {
81                 EBUG_ON(lock->owner != current);
82         }
83 }
84
85 static inline unsigned pcpu_read_count(struct six_lock *lock)
86 {
87         unsigned read_count = 0;
88         int cpu;
89
90         for_each_possible_cpu(cpu)
91                 read_count += *per_cpu_ptr(lock->readers, cpu);
92         return read_count;
93 }
94
95 /* This is probably up there with the more evil things I've done */
96 #define waitlist_bitnr(id) ilog2((((union six_lock_state) { .waiters = 1 << (id) }).l))
97
98 static int __do_six_trylock_type(struct six_lock *lock,
99                                  enum six_lock_type type,
100                                  struct task_struct *task,
101                                  bool try)
102 {
103         const struct six_lock_vals l[] = LOCK_VALS;
104         union six_lock_state old, new;
105         int ret;
106         u64 v;
107
108         EBUG_ON(type == SIX_LOCK_write && lock->owner != task);
109         EBUG_ON(type == SIX_LOCK_write && (lock->state.seq & 1));
110         EBUG_ON(type == SIX_LOCK_write && (try != !(lock->state.write_locking)));
111
112         /*
113          * Percpu reader mode:
114          *
115          * The basic idea behind this algorithm is that you can implement a lock
116          * between two threads without any atomics, just memory barriers:
117          *
118          * For two threads you'll need two variables, one variable for "thread a
119          * has the lock" and another for "thread b has the lock".
120          *
121          * To take the lock, a thread sets its variable indicating that it holds
122          * the lock, then issues a full memory barrier, then reads from the
123          * other thread's variable to check if the other thread thinks it has
124          * the lock. If we raced, we backoff and retry/sleep.
125          */
126
127         if (type == SIX_LOCK_read && lock->readers) {
128                 preempt_disable();
129                 this_cpu_inc(*lock->readers); /* signal that we own lock */
130
131                 smp_mb();
132
133                 old.v = READ_ONCE(lock->state.v);
134                 ret = !(old.v & l[type].lock_fail);
135
136                 this_cpu_sub(*lock->readers, !ret);
137                 preempt_enable();
138
139                 /*
140                  * If we failed because a writer was trying to take the
141                  * lock, issue a wakeup because we might have caused a
142                  * spurious trylock failure:
143                  */
144                 if (old.write_locking)
145                         ret = -1 - SIX_LOCK_write;
146         } else if (type == SIX_LOCK_write && lock->readers) {
147                 if (try) {
148                         atomic64_add(__SIX_VAL(write_locking, 1),
149                                      &lock->state.counter);
150                         smp_mb__after_atomic();
151                 }
152
153                 ret = !pcpu_read_count(lock);
154
155                 /*
156                  * On success, we increment lock->seq; also we clear
157                  * write_locking unless we failed from the lock path:
158                  */
159                 v = 0;
160                 if (ret)
161                         v += __SIX_VAL(seq, 1);
162                 if (ret || try)
163                         v -= __SIX_VAL(write_locking, 1);
164
165                 if (!ret && !try && !(lock->state.waiters & (1 << SIX_LOCK_write)))
166                         v += __SIX_VAL(waiters, 1 << SIX_LOCK_write);
167
168                 if (try && !ret) {
169                         old.v = atomic64_add_return(v, &lock->state.counter);
170                         if (old.waiters & (1 << SIX_LOCK_read))
171                                 ret = -1 - SIX_LOCK_read;
172                 } else {
173                         atomic64_add(v, &lock->state.counter);
174                 }
175         } else {
176                 v = READ_ONCE(lock->state.v);
177                 do {
178                         new.v = old.v = v;
179
180                         if (!(old.v & l[type].lock_fail)) {
181                                 new.v += l[type].lock_val;
182
183                                 if (type == SIX_LOCK_write)
184                                         new.write_locking = 0;
185                         } else if (!try && !(new.waiters & (1 << type)))
186                                 new.waiters |= 1 << type;
187                         else
188                                 break; /* waiting bit already set */
189                 } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
190                                         old.v, new.v)) != old.v);
191
192                 ret = !(old.v & l[type].lock_fail);
193
194                 EBUG_ON(ret && !(lock->state.v & l[type].held_mask));
195         }
196
197         if (ret > 0)
198                 six_set_owner(lock, type, old, task);
199
200         EBUG_ON(type == SIX_LOCK_write && (try || ret > 0) && (lock->state.write_locking));
201
202         return ret;
203 }
204
205 static inline void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_type)
206 {
207         struct six_lock_waiter *w, *next;
208         struct task_struct *task;
209         bool saw_one;
210         int ret;
211 again:
212         ret = 0;
213         saw_one = false;
214         raw_spin_lock(&lock->wait_lock);
215
216         list_for_each_entry_safe(w, next, &lock->wait_list, list) {
217                 if (w->lock_want != lock_type)
218                         continue;
219
220                 if (saw_one && lock_type != SIX_LOCK_read)
221                         goto unlock;
222                 saw_one = true;
223
224                 ret = __do_six_trylock_type(lock, lock_type, w->task, false);
225                 if (ret <= 0)
226                         goto unlock;
227
228                 __list_del(w->list.prev, w->list.next);
229                 task = w->task;
230                 /*
231                  * Do no writes to @w besides setting lock_acquired - otherwise
232                  * we would need a memory barrier:
233                  */
234                 barrier();
235                 w->lock_acquired = true;
236                 wake_up_process(task);
237         }
238
239         clear_bit(waitlist_bitnr(lock_type), (unsigned long *) &lock->state.v);
240 unlock:
241         raw_spin_unlock(&lock->wait_lock);
242
243         if (ret < 0) {
244                 lock_type = -ret - 1;
245                 goto again;
246         }
247 }
248
249 static inline void six_lock_wakeup(struct six_lock *lock,
250                                    union six_lock_state state,
251                                    enum six_lock_type lock_type)
252 {
253         if (lock_type == SIX_LOCK_write && state.read_lock)
254                 return;
255
256         if (!(state.waiters & (1 << lock_type)))
257                 return;
258
259         __six_lock_wakeup(lock, lock_type);
260 }
261
262 static bool do_six_trylock_type(struct six_lock *lock,
263                                 enum six_lock_type type,
264                                 bool try)
265 {
266         int ret;
267
268         ret = __do_six_trylock_type(lock, type, current, try);
269         if (ret < 0)
270                 __six_lock_wakeup(lock, -ret - 1);
271
272         return ret > 0;
273 }
274
275 __always_inline __flatten
276 static bool __six_trylock_type(struct six_lock *lock, enum six_lock_type type)
277 {
278         if (!do_six_trylock_type(lock, type, true))
279                 return false;
280
281         if (type != SIX_LOCK_write)
282                 six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read);
283         return true;
284 }
285
286 __always_inline __flatten
287 static bool __six_relock_type(struct six_lock *lock, enum six_lock_type type,
288                               unsigned seq)
289 {
290         const struct six_lock_vals l[] = LOCK_VALS;
291         union six_lock_state old;
292         u64 v;
293
294         EBUG_ON(type == SIX_LOCK_write);
295
296         if (type == SIX_LOCK_read &&
297             lock->readers) {
298                 bool ret;
299
300                 preempt_disable();
301                 this_cpu_inc(*lock->readers);
302
303                 smp_mb();
304
305                 old.v = READ_ONCE(lock->state.v);
306                 ret = !(old.v & l[type].lock_fail) && old.seq == seq;
307
308                 this_cpu_sub(*lock->readers, !ret);
309                 preempt_enable();
310
311                 /*
312                  * Similar to the lock path, we may have caused a spurious write
313                  * lock fail and need to issue a wakeup:
314                  */
315                 if (old.write_locking)
316                         six_lock_wakeup(lock, old, SIX_LOCK_write);
317
318                 if (ret)
319                         six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read);
320
321                 return ret;
322         }
323
324         v = READ_ONCE(lock->state.v);
325         do {
326                 old.v = v;
327
328                 if (old.seq != seq || old.v & l[type].lock_fail)
329                         return false;
330         } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
331                                 old.v,
332                                 old.v + l[type].lock_val)) != old.v);
333
334         six_set_owner(lock, type, old, current);
335         if (type != SIX_LOCK_write)
336                 six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read);
337         return true;
338 }
339
340 #ifdef CONFIG_LOCK_SPIN_ON_OWNER
341
342 static inline bool six_optimistic_spin(struct six_lock *lock,
343                                        struct six_lock_waiter *wait)
344 {
345         struct task_struct *owner, *task = current;
346
347         switch (wait->lock_want) {
348         case SIX_LOCK_read:
349                 break;
350         case SIX_LOCK_intent:
351                 if (lock->wait_list.next != &wait->list)
352                         return false;
353                 break;
354         case SIX_LOCK_write:
355                 return false;
356         }
357
358         rcu_read_lock();
359         owner = READ_ONCE(lock->owner);
360
361         while (owner && lock->owner == owner) {
362                 /*
363                  * Ensure we emit the owner->on_cpu, dereference _after_
364                  * checking lock->owner still matches owner. If that fails,
365                  * owner might point to freed memory. If it still matches,
366                  * the rcu_read_lock() ensures the memory stays valid.
367                  */
368                 barrier();
369
370                 /*
371                  * If we're an RT task that will live-lock because we won't let
372                  * the owner complete.
373                  */
374                 if (wait->lock_acquired ||
375                     !owner->on_cpu ||
376                     rt_task(task) ||
377                     need_resched())
378                         break;
379
380                 cpu_relax();
381         }
382         rcu_read_unlock();
383
384         return wait->lock_acquired;
385 }
386
387 #else /* CONFIG_LOCK_SPIN_ON_OWNER */
388
389 static inline bool six_optimistic_spin(struct six_lock *lock,
390                                        struct six_lock_waiter *wait)
391 {
392         return false;
393 }
394
395 #endif
396
397 noinline
398 static int __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type type,
399                                     struct six_lock_waiter *wait,
400                                     six_lock_should_sleep_fn should_sleep_fn, void *p)
401 {
402         union six_lock_state old;
403         int ret = 0;
404
405         if (type == SIX_LOCK_write) {
406                 EBUG_ON(lock->state.write_locking);
407                 atomic64_add(__SIX_VAL(write_locking, 1), &lock->state.counter);
408                 smp_mb__after_atomic();
409         }
410
411         lock_contended(&lock->dep_map, _RET_IP_);
412
413         wait->task              = current;
414         wait->lock_want         = type;
415         wait->lock_acquired     = false;
416
417         raw_spin_lock(&lock->wait_lock);
418         if (!(lock->state.waiters & (1 << type)))
419                 set_bit(waitlist_bitnr(type), (unsigned long *) &lock->state.v);
420         /*
421          * Retry taking the lock after taking waitlist lock, have raced with an
422          * unlock:
423          */
424         ret = __do_six_trylock_type(lock, type, current, false);
425         if (ret <= 0) {
426                 wait->start_time = local_clock();
427
428                 if (!list_empty(&lock->wait_list)) {
429                         struct six_lock_waiter *last =
430                                 list_last_entry(&lock->wait_list,
431                                         struct six_lock_waiter, list);
432
433                         if (time_before_eq64(wait->start_time, last->start_time))
434                                 wait->start_time = last->start_time + 1;
435                 }
436
437                 list_add_tail(&wait->list, &lock->wait_list);
438         }
439         raw_spin_unlock(&lock->wait_lock);
440
441         if (unlikely(ret > 0)) {
442                 ret = 0;
443                 goto out;
444         }
445
446         if (unlikely(ret < 0)) {
447                 __six_lock_wakeup(lock, -ret - 1);
448                 ret = 0;
449         }
450
451         if (six_optimistic_spin(lock, wait))
452                 goto out;
453
454         while (1) {
455                 set_current_state(TASK_UNINTERRUPTIBLE);
456
457                 if (wait->lock_acquired)
458                         break;
459
460                 ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
461                 if (unlikely(ret)) {
462                         raw_spin_lock(&lock->wait_lock);
463                         if (!wait->lock_acquired)
464                                 list_del(&wait->list);
465                         raw_spin_unlock(&lock->wait_lock);
466
467                         if (wait->lock_acquired)
468                                 do_six_unlock_type(lock, type);
469                         break;
470                 }
471
472                 schedule();
473         }
474
475         __set_current_state(TASK_RUNNING);
476 out:
477         if (ret && type == SIX_LOCK_write && lock->state.write_locking) {
478                 old.v = atomic64_sub_return(__SIX_VAL(write_locking, 1),
479                                             &lock->state.counter);
480                 six_lock_wakeup(lock, old, SIX_LOCK_read);
481         }
482
483         return ret;
484 }
485
486 __always_inline __flatten
487 static int __six_lock_type_waiter(struct six_lock *lock, enum six_lock_type type,
488                          struct six_lock_waiter *wait,
489                          six_lock_should_sleep_fn should_sleep_fn, void *p)
490 {
491         int ret;
492
493         wait->start_time = 0;
494
495         if (type != SIX_LOCK_write)
496                 six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read);
497
498         ret = do_six_trylock_type(lock, type, true) ? 0
499                 : __six_lock_type_slowpath(lock, type, wait, should_sleep_fn, p);
500
501         if (ret && type != SIX_LOCK_write)
502                 six_release(&lock->dep_map);
503         if (!ret)
504                 lock_acquired(&lock->dep_map, _RET_IP_);
505
506         return ret;
507 }
508
509 __always_inline
510 static int __six_lock_type(struct six_lock *lock, enum six_lock_type type,
511                            six_lock_should_sleep_fn should_sleep_fn, void *p)
512 {
513         struct six_lock_waiter wait;
514
515         return __six_lock_type_waiter(lock, type, &wait, should_sleep_fn, p);
516 }
517
518 __always_inline __flatten
519 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type)
520 {
521         const struct six_lock_vals l[] = LOCK_VALS;
522         union six_lock_state state;
523
524         if (type == SIX_LOCK_intent)
525                 lock->owner = NULL;
526
527         if (type == SIX_LOCK_read &&
528             lock->readers) {
529                 smp_mb(); /* unlock barrier */
530                 this_cpu_dec(*lock->readers);
531                 smp_mb(); /* between unlocking and checking for waiters */
532                 state.v = READ_ONCE(lock->state.v);
533         } else {
534                 EBUG_ON(!(lock->state.v & l[type].held_mask));
535                 state.v = atomic64_add_return_release(l[type].unlock_val,
536                                                       &lock->state.counter);
537         }
538
539         six_lock_wakeup(lock, state, l[type].unlock_wakeup);
540 }
541
542 __always_inline __flatten
543 static void __six_unlock_type(struct six_lock *lock, enum six_lock_type type)
544 {
545         EBUG_ON(type == SIX_LOCK_write &&
546                 !(lock->state.v & __SIX_LOCK_HELD_intent));
547         EBUG_ON((type == SIX_LOCK_write ||
548                  type == SIX_LOCK_intent) &&
549                 lock->owner != current);
550
551         if (type != SIX_LOCK_write)
552                 six_release(&lock->dep_map);
553
554         if (type == SIX_LOCK_intent &&
555             lock->intent_lock_recurse) {
556                 --lock->intent_lock_recurse;
557                 return;
558         }
559
560         do_six_unlock_type(lock, type);
561 }
562
563 #define __SIX_LOCK(type)                                                \
564 bool six_trylock_##type(struct six_lock *lock)                          \
565 {                                                                       \
566         return __six_trylock_type(lock, SIX_LOCK_##type);               \
567 }                                                                       \
568 EXPORT_SYMBOL_GPL(six_trylock_##type);                                  \
569                                                                         \
570 bool six_relock_##type(struct six_lock *lock, u32 seq)                  \
571 {                                                                       \
572         return __six_relock_type(lock, SIX_LOCK_##type, seq);           \
573 }                                                                       \
574 EXPORT_SYMBOL_GPL(six_relock_##type);                                   \
575                                                                         \
576 int six_lock_##type(struct six_lock *lock,                              \
577                     six_lock_should_sleep_fn should_sleep_fn, void *p)  \
578 {                                                                       \
579         return __six_lock_type(lock, SIX_LOCK_##type, should_sleep_fn, p);\
580 }                                                                       \
581 EXPORT_SYMBOL_GPL(six_lock_##type);                                     \
582                                                                         \
583 int six_lock_waiter_##type(struct six_lock *lock,                       \
584                            struct six_lock_waiter *wait,                \
585                            six_lock_should_sleep_fn should_sleep_fn, void *p)\
586 {                                                                       \
587         return __six_lock_type_waiter(lock, SIX_LOCK_##type, wait, should_sleep_fn, p);\
588 }                                                                       \
589 EXPORT_SYMBOL_GPL(six_lock_waiter_##type);                              \
590                                                                         \
591 void six_unlock_##type(struct six_lock *lock)                           \
592 {                                                                       \
593         __six_unlock_type(lock, SIX_LOCK_##type);                       \
594 }                                                                       \
595 EXPORT_SYMBOL_GPL(six_unlock_##type);
596
597 __SIX_LOCK(read)
598 __SIX_LOCK(intent)
599 __SIX_LOCK(write)
600
601 #undef __SIX_LOCK
602
603 /* Convert from intent to read: */
604 void six_lock_downgrade(struct six_lock *lock)
605 {
606         six_lock_increment(lock, SIX_LOCK_read);
607         six_unlock_intent(lock);
608 }
609 EXPORT_SYMBOL_GPL(six_lock_downgrade);
610
611 bool six_lock_tryupgrade(struct six_lock *lock)
612 {
613         union six_lock_state old, new;
614         u64 v = READ_ONCE(lock->state.v);
615
616         do {
617                 new.v = old.v = v;
618
619                 if (new.intent_lock)
620                         return false;
621
622                 if (!lock->readers) {
623                         EBUG_ON(!new.read_lock);
624                         new.read_lock--;
625                 }
626
627                 new.intent_lock = 1;
628         } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
629                                 old.v, new.v)) != old.v);
630
631         if (lock->readers)
632                 this_cpu_dec(*lock->readers);
633
634         six_set_owner(lock, SIX_LOCK_intent, old, current);
635
636         return true;
637 }
638 EXPORT_SYMBOL_GPL(six_lock_tryupgrade);
639
640 bool six_trylock_convert(struct six_lock *lock,
641                          enum six_lock_type from,
642                          enum six_lock_type to)
643 {
644         EBUG_ON(to == SIX_LOCK_write || from == SIX_LOCK_write);
645
646         if (to == from)
647                 return true;
648
649         if (to == SIX_LOCK_read) {
650                 six_lock_downgrade(lock);
651                 return true;
652         } else {
653                 return six_lock_tryupgrade(lock);
654         }
655 }
656 EXPORT_SYMBOL_GPL(six_trylock_convert);
657
658 /*
659  * Increment read/intent lock count, assuming we already have it read or intent
660  * locked:
661  */
662 void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
663 {
664         const struct six_lock_vals l[] = LOCK_VALS;
665
666         six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read);
667
668         /* XXX: assert already locked, and that we don't overflow: */
669
670         switch (type) {
671         case SIX_LOCK_read:
672                 if (lock->readers) {
673                         this_cpu_inc(*lock->readers);
674                 } else {
675                         EBUG_ON(!lock->state.read_lock &&
676                                 !lock->state.intent_lock);
677                         atomic64_add(l[type].lock_val, &lock->state.counter);
678                 }
679                 break;
680         case SIX_LOCK_intent:
681                 EBUG_ON(!lock->state.intent_lock);
682                 lock->intent_lock_recurse++;
683                 break;
684         case SIX_LOCK_write:
685                 BUG();
686                 break;
687         }
688 }
689 EXPORT_SYMBOL_GPL(six_lock_increment);
690
691 void six_lock_wakeup_all(struct six_lock *lock)
692 {
693         union six_lock_state state = lock->state;
694         struct six_lock_waiter *w;
695
696         six_lock_wakeup(lock, state, SIX_LOCK_read);
697         six_lock_wakeup(lock, state, SIX_LOCK_intent);
698         six_lock_wakeup(lock, state, SIX_LOCK_write);
699
700         raw_spin_lock(&lock->wait_lock);
701         list_for_each_entry(w, &lock->wait_list, list)
702                 wake_up_process(w->task);
703         raw_spin_unlock(&lock->wait_lock);
704 }
705 EXPORT_SYMBOL_GPL(six_lock_wakeup_all);
706
707 void six_lock_pcpu_free(struct six_lock *lock)
708 {
709         BUG_ON(lock->readers && pcpu_read_count(lock));
710         BUG_ON(lock->state.read_lock);
711
712         free_percpu(lock->readers);
713         lock->readers = NULL;
714 }
715 EXPORT_SYMBOL_GPL(six_lock_pcpu_free);
716
717 void six_lock_pcpu_alloc(struct six_lock *lock)
718 {
719 #ifdef __KERNEL__
720         if (!lock->readers)
721                 lock->readers = alloc_percpu(unsigned);
722 #endif
723 }
724 EXPORT_SYMBOL_GPL(six_lock_pcpu_alloc);
725
726 /*
727  * Returns lock held counts, for both read and intent
728  */
729 struct six_lock_count six_lock_counts(struct six_lock *lock)
730 {
731         struct six_lock_count ret;
732
733         ret.n[SIX_LOCK_read]    = 0;
734         ret.n[SIX_LOCK_intent]  = lock->state.intent_lock + lock->intent_lock_recurse;
735         ret.n[SIX_LOCK_write]   = lock->state.seq & 1;
736
737         if (!lock->readers)
738                 ret.n[SIX_LOCK_read] += lock->state.read_lock;
739         else {
740                 int cpu;
741
742                 for_each_possible_cpu(cpu)
743                         ret.n[SIX_LOCK_read] += *per_cpu_ptr(lock->readers, cpu);
744         }
745
746         return ret;
747 }
748 EXPORT_SYMBOL_GPL(six_lock_counts);